repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
kwitnacy/wti
|
[
"9ee659bf2912f5b2fe6229bbda5074d4f3ebb3d4"
] |
[
"wtiproj04/API.py"
] |
[
"import pandas as pd\nimport numpy as np\nfrom typing import List\n\n\ndef my_join() -> List[str]:\n df_main = pd.read_csv('/home/kwitnoncy/Documents/politechnika/wti/wtiproj03/data/user_ratedmovies.dat', sep='\\t')\n df_genres = pd.read_csv('/home/kwitnoncy/Documents/politechnika/wti/wtiproj03/data/movie_genres.dat', sep='\\t')\n df_genres['Dummy'] = 1\n\n df_pivoted = df_genres.pivot_table(index='movieID', columns='genre', values='Dummy')\n joined = df_main.join(df_pivoted, on='movieID')\n joined[np.isnan] = 0\n joined.to_csv('/home/kwitnoncy/Documents/politechnika/wti/wtiproj03/data/joined.dat', index=False, sep='\\t')\n\n return list(df_pivoted.columns.values)\n\n\ndef my_to_dict() -> List[dict]:\n joined = pd.read_csv('/home/kwitnoncy/Documents/politechnika/wti/wtiproj03/data/joined.dat', sep='\\t')\n\n return joined.to_dict('records')\n\n\ndef my_from_dict_to_dataframe(l: List[dict] = []) -> pd.DataFrame:\n return pd.DataFrame.from_records(l)\n\n\ndef get_avg_rating_genre() -> np.array:\n # head = my_join()\n head = ['Action', 'Adventure', 'Animation', 'Children', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'IMAX', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Short', 'Thriller', 'War', 'Western']\n\n joined = pd.read_csv('/home/kwitnoncy/Documents/politechnika/wti/wtiproj03/data/joined.dat', sep='\\t')\n joined = joined.to_numpy()\n\n avg = [np.nanmean([(row[2] * row[genre + 9]) if row[genre + 9] != 0 else np.nan for row in joined]) for genre in range(len(head))]\n\n pd.DataFrame([avg], columns=head).to_csv('/home/kwitnoncy/Documents/politechnika/wti/wtiproj04/data/avg_rating.dat', index=False, sep='\\t')\n\n print('dict:')\n print({x: y for x, y in zip(head, avg)})\n \n return np.array(avg)\n\n\ndef get_avg_rating_genre_user(user_ID: int = 0) -> np.array:\n query = 'userID == ' + str(user_ID)\n # head = my_join()\n head = ['Action', 'Adventure', 'Animation', 'Children', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'IMAX', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Short', 'Thriller', 'War', 'Western']\n\n joined = pd.read_csv('/home/kwitnoncy/Documents/politechnika/wti/wtiproj03/data/joined.dat', sep='\\t') \n joined = joined.query(query).to_numpy()\n\n return np.array([np.nanmean([(row[2] * row[genre + 9]) if row[genre + 9] != 0 else np.nan for row in joined]) for genre in range(len(head))])\n\n\ndef get_user_profile(user_ID: int = 0) -> np.array:\n avg = get_avg_rating_genre()\n user_avg = get_avg_rating_genre_user(user_ID)\n \n return np.nan_to_num(avg - user_avg)\n"
] |
[
[
"pandas.read_csv",
"numpy.nan_to_num",
"pandas.DataFrame",
"numpy.nanmean",
"pandas.DataFrame.from_records",
"numpy.array"
]
] |
yang131313/Paddle
|
[
"c197d73be1babe64d93b714701b21066d6645482"
] |
[
"python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py"
] |
[
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport unittest\nimport abc\nimport os\nimport enum\nimport time\nimport logging\nimport shutil\nimport paddle\nimport paddle.fluid as fluid\nfrom paddle.fluid.initializer import NumpyArrayInitializer\nfrom paddle.fluid.core import PassVersionChecker\nimport paddle.fluid.core as core\nfrom paddle import compat as cpt\nimport paddle.inference as paddle_infer\nfrom typing import Optional, List, Callable, Dict, Any, Set\nfrom program_config import TensorConfig, OpConfig, ProgramConfig, create_fake_model, create_quant_model\n\nimport hypothesis\nfrom hypothesis import given, settings, seed, reproduce_failure\nimport hypothesis.strategies as st\n\nlogging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n\nsettings.register_profile(\n \"ci\",\n max_examples=100,\n suppress_health_check=hypothesis.HealthCheck.all(),\n deadline=None,\n print_blob=True,\n derandomize=True,\n report_multiple_bugs=False)\nsettings.register_profile(\n \"dev\",\n max_examples=1000,\n suppress_health_check=hypothesis.HealthCheck.all(),\n deadline=None,\n print_blob=True,\n derandomize=True,\n report_multiple_bugs=False)\nif float(os.getenv('TEST_NUM_PERCENT_CASES', default='1.0')) < 1 or \\\n os.getenv('HYPOTHESIS_TEST_PROFILE', 'dev') == 'ci':\n settings.load_profile(\"ci\")\nelse:\n settings.load_profile(\"dev\")\n\n\nclass IgnoreReasons(enum.Enum):\n # Paddle not support, but trt support, we need to add the feature.\n TRT_NOT_IMPLEMENTED = 0\n # TRT not support.\n TRT_NOT_SUPPORT = 1\n # Accuracy is abnormal after enabling pass.\n PASS_ACCURACY_ERROR = 2\n # Accuracy is abnormal after enabling mkldnn.\n MKLDNN_ACCURACY_ERROR = 3\n\n\n# TODO(wilber): just for backward compatible\nSkipReasons = IgnoreReasons\n\n\nclass AutoScanTest(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n np.random.seed(1024)\n paddle.enable_static()\n super(AutoScanTest, self).__init__(*args, **kwargs)\n self.ignore_cases = []\n abs_dir = os.path.abspath(os.path.dirname(__file__))\n self.cache_dir = os.path.join(abs_dir,\n str(self.__module__) + '_cache_dir')\n self.available_passes_in_framework = set()\n self.num_ran_programs = 0\n self.num_invalid_programs = 0\n self.num_ignore_tests = 0\n self.num_predictor_kinds = 0\n\n @abc.abstractmethod\n def sample_program_configs(self):\n '''\n Generate all config with the combination of different Input tensor shape and\n different Attr values.\n '''\n raise NotImplementedError\n\n @abc.abstractmethod\n def sample_predictor_configs(self):\n raise NotImplementedError\n\n @abc.abstractmethod\n def add_ignore_check_case(\n self,\n teller: [Callable[[ProgramConfig, paddle_infer.Config], bool]],\n reason: IgnoreReasons,\n note: str):\n self.ignore_cases.append((teller, reason, note))\n\n def is_program_valid(self, program_config: ProgramConfig) -> bool:\n return True\n\n def run_test_config(self, model, params, prog_config, pred_config,\n feed_data) -> Dict[str, np.ndarray]:\n '''\n Test a single case.\n '''\n pred_config.set_model_buffer(model, len(model), params, len(params))\n predictor = paddle_infer.create_predictor(pred_config)\n self.available_passes_in_framework = self.available_passes_in_framework | set(\n pred_config.pass_builder().all_passes())\n for name, _ in prog_config.inputs.items():\n input_tensor = predictor.get_input_handle(name)\n input_tensor.copy_from_cpu(feed_data[name]['data'])\n if feed_data[name]['lod'] is not None:\n input_tensor.set_lod(feed_data[name]['lod'])\n predictor.run()\n result = {}\n for out_name, o_name in zip(prog_config.outputs,\n predictor.get_output_names()):\n result[out_name] = predictor.get_output_handle(o_name).copy_to_cpu()\n return result\n\n @abc.abstractmethod\n def assert_tensors_near(self,\n atol: float,\n rtol: float,\n tensor: Dict[str, np.array],\n baseline: Dict[str, np.array]):\n for key, arr in tensor.items():\n self.assertTrue(\n baseline[key].shape == arr.shape,\n \"The output shapes are not equal, the baseline shape is \" +\n str(baseline[key].shape) + ', but got ' + str(arr.shape))\n diff = abs(baseline[key] - arr)\n self.assertTrue(\n np.allclose(\n baseline[key], arr, atol=atol, rtol=rtol),\n \"Output has diff, Maximum absolute error: {}\".format(\n np.amax(diff)))\n\n @abc.abstractmethod\n def run_test(self, quant=False):\n raise NotImplementedError\n\n def generate_op_config(self,\n ops_config: List[Dict[str, Any]]) -> List[OpConfig]:\n ops = []\n for i in range(len(ops_config)):\n op_config = ops_config[i]\n ops.append(\n OpConfig(\n type=op_config['op_type'],\n inputs=op_config['op_inputs'],\n outputs=op_config['op_outputs'],\n attrs=op_config['op_attrs']))\n return ops\n\n @abc.abstractmethod\n def ignore_log(self, msg: str):\n logging.warning(\"SKIP: \" + msg)\n\n @abc.abstractmethod\n def fail_log(self, msg: str):\n logging.error(\"FAIL: \" + msg)\n\n @abc.abstractmethod\n def success_log(self, msg: str):\n logging.info(\"SUCCESS: \" + msg)\n\n @abc.abstractmethod\n def create_inference_config(self,\n passes: Optional[List[str]]=None,\n use_gpu: bool=False,\n use_mkldnn: bool=False,\n ir_optim: Optional[bool]=None):\n config = paddle_infer.Config()\n config.switch_ir_debug(True)\n config.set_optim_cache_dir(self.cache_dir)\n config.disable_glog_info()\n if ir_optim is not None:\n config.switch_ir_optim(ir_optim)\n if use_gpu:\n config.enable_use_gpu(100, 0)\n if use_mkldnn:\n config.enable_mkldnn()\n if passes is not None:\n config.pass_builder().set_passes(passes)\n self.passes = passes\n return config\n\n\nclass MkldnnAutoScanTest(AutoScanTest):\n def __init__(self, *args, **kwargs):\n super(MkldnnAutoScanTest, self).__init__(*args, **kwargs)\n\n def run_test(self, quant=False, *args, **kwargs):\n status = True\n\n for prog_config in self.sample_program_configs(*args, **kwargs):\n # if program is invalid, we should skip that cases.\n if not self.is_program_valid(prog_config):\n continue\n\n model, params = create_fake_model(prog_config)\n if quant:\n model, params = create_quant_model(model, params)\n\n feed_data = {}\n for name, tensor_config in prog_config.inputs.items():\n feed_data[name] = {\n 'data': tensor_config.data,\n 'lod': tensor_config.lod\n }\n results: List[Dict[str, np.ndarray]] = []\n\n # baseline: cpu no ir_optim run\n base_config = self.create_inference_config(ir_optim=False)\n logging.info('RUN program_config: ' + str(prog_config))\n results.append(\n self.run_test_config(model, params, prog_config, base_config,\n feed_data))\n self.success_log('RUN_CPU_BASELINE done')\n\n for pred_config, (\n atol, rtol) in self.sample_predictor_configs(prog_config):\n # skip info\n ignore_flag = False\n for ignore_info in self.ignore_cases:\n if ignore_info[0](prog_config, pred_config):\n ignore_flag = True\n if ignore_info[\n 1] == IgnoreReasons.MKLDNN_ACCURACY_ERROR:\n self.ignore_log(\"[MKLDNN_ACCURACY_ERROR] \" +\n ignore_info[2] + ' ' + ' vs ' +\n self.inference_config_str(\n pred_config))\n else:\n raise NotImplementedError\n break\n\n if os.path.exists(self.cache_dir):\n shutil.rmtree(self.cache_dir)\n if not os.path.exists(self.cache_dir):\n os.mkdir(self.cache_dir)\n\n try:\n results.append(\n self.run_test_config(model, params, prog_config,\n pred_config, feed_data))\n self.assert_tensors_near(atol, rtol, results[-1],\n results[0])\n except Exception as e:\n self.fail_log(\n self.inference_config_str(pred_config) +\n '\\033[1;31m \\nERROR INFO: {}\\033[0m'.format(str(e)))\n if not ignore_flag:\n status = False\n continue\n self.success_log('RUN predictor_config ' + self.\n inference_config_str(pred_config) + ' done')\n\n self.assertTrue(status)\n\n def inference_config_str(self, config) -> str:\n dic = {}\n enable_mkldnn = config.mkldnn_enabled()\n dic['use_mkldnn'] = enable_mkldnn\n enable_gpu = config.use_gpu()\n dic['use_gpu'] = enable_gpu\n return str(dic)\n\n\nclass PassAutoScanTest(AutoScanTest):\n def __init__(self, *args, **kwargs):\n super(PassAutoScanTest, self).__init__(*args, **kwargs)\n self.passes = []\n\n def check_op_version(self):\n status = True\n for pass_name in self.passes:\n if pass_name not in self.available_passes_in_framework:\n continue\n if not PassVersionChecker.IsCompatible(pass_name):\n self.fail_log('{} version check failed.'.format(pass_name))\n status = False\n return status\n\n def add_ignore_pass_case(self):\n return\n\n def assert_op_list(self, op_list_after_fusion):\n if not self.passes:\n raise ValueError(\n \"In PassAutoScan you should give a valid pass name.\")\n last_passed_program = os.path.join(self.cache_dir,\n self.passes[-1] + \".pdmodel\")\n if not os.path.exists(last_passed_program):\n raise ValueError(\n \"Cannot find file {}, please make sure that your pass name is correct\".\n format(last_passed_program))\n model_bytes = paddle.static.load_from_file(last_passed_program)\n pg = paddle.static.deserialize_program(model_bytes)\n main_block = pg.desc.block(0)\n after_op_list = list()\n for i in range(main_block.op_size()):\n if main_block.op(i).type() in [\"feed\", \"fetch\"]:\n continue\n after_op_list.append(main_block.op(i).type())\n self.assertTrue(\n op_list_after_fusion == after_op_list,\n \"Expected operator list after fusion is {}, but now it's {}\".format(\n op_list_after_fusion, after_op_list), )\n\n def run_and_statis(self,\n quant=False,\n max_examples=100,\n reproduce=None,\n min_success_num=25,\n max_duration=180,\n passes=None):\n if os.getenv('HYPOTHESIS_TEST_PROFILE', 'ci') == \"dev\":\n max_examples *= 10\n min_success_num *= 10\n # while at ce phase, there's no limit on time\n max_duration = -1\n start_time = time.time()\n settings.register_profile(\n \"ci\",\n max_examples=max_examples,\n suppress_health_check=hypothesis.HealthCheck.all(),\n deadline=None,\n print_blob=True,\n derandomize=True,\n report_multiple_bugs=False, )\n settings.load_profile(\"ci\")\n assert passes is not None, \"Parameter of passes must be defined in function run_and_statis.\"\n self.passes = passes\n\n self.add_ignore_pass_case()\n\n def program_generator(draw):\n return self.sample_program_config(draw)\n\n def run_test(prog_config):\n return self.run_test(quant=quant, prog_configs=[prog_config])\n\n generator = st.composite(program_generator)\n loop_func = given(generator())(run_test)\n if reproduce is not None:\n loop_func = reproduce(loop_func)\n logging.info(\"Start to running test of {}\".format(type(self)))\n loop_func()\n logging.info(\n \"===================Statistical Information===================\")\n logging.info(\"Number of Generated Programs: {}\".format(\n self.num_ran_programs + self.num_invalid_programs))\n logging.info(\"Number of Invalid Programs: {}\".format(\n self.num_invalid_programs))\n logging.info(\"Number of Ran Programs: {}\".format(self.num_ran_programs))\n logging.info(\"Number of Ignore Tests: {}\".format(self.num_ignore_tests))\n successful_ran_programs = int(self.num_ran_programs -\n self.num_ignore_tests / max(\n self.num_predictor_kinds, 1))\n logging.info(\n \"Number of successfully ran programs approximately equal to {}\".\n format(successful_ran_programs))\n if successful_ran_programs < min_success_num:\n logging.warning(\n \"satisfied_programs = ran_programs - num_ignore_tests / num_predictor_kinds\"\n )\n logging.error(\n \"At least {} programs need to ran successfully, but now only about {} programs satisfied.\".\n format(min_success_num, successful_ran_programs))\n assert False\n used_time = time.time() - start_time\n if max_duration > 0 and used_time > max_duration:\n logging.error(\n \"The duration exceeds {} seconds, if this is neccessary, try to set a larger number for parameter `max_duration`.\".\n format(max_duration))\n assert False\n\n def run_test(self, quant=False, prog_configs=None):\n status = True\n\n for prog_config in prog_configs:\n # if program is invalid, we should skip that cases.\n if not self.is_program_valid(prog_config):\n self.num_invalid_programs += 1\n continue\n self.num_ran_programs += 1\n model, params = create_fake_model(prog_config)\n if quant:\n model, params = create_quant_model(model, params)\n\n feed_data = {}\n for name, tensor_config in prog_config.inputs.items():\n feed_data[name] = {\n 'data': tensor_config.data,\n 'lod': tensor_config.lod\n }\n\n logging.info('RUN program_config: ' + str(prog_config))\n self.num_predictor_kinds = 0\n for pred_config, op_list, (\n atol, rtol) in self.sample_predictor_configs(prog_config):\n self.num_predictor_kinds += 1\n\n # skip info\n ignore_flag = False\n for ignore_info in self.ignore_cases:\n if ignore_info[0](prog_config, pred_config):\n ignore_flag = True\n self.num_ignore_tests += 1\n if ignore_info[1] == IgnoreReasons.PASS_ACCURACY_ERROR:\n self.ignore_log(\"[PASS_ACCURACY_ERROR] \" +\n ignore_info[2] + ' ' + ' vs ' +\n self.inference_config_str(\n pred_config))\n else:\n raise NotImplementedError\n break\n\n if os.path.exists(self.cache_dir):\n shutil.rmtree(self.cache_dir)\n if not os.path.exists(self.cache_dir):\n os.mkdir(self.cache_dir)\n\n # baseline: no ir_optim run\n base_config = self.create_inference_config(\n ir_optim=False,\n use_gpu=pred_config.use_gpu(),\n use_mkldnn=pred_config.mkldnn_enabled(), )\n try:\n # baseline\n base_result = self.run_test_config(\n model, params, prog_config, base_config, feed_data)\n self.success_log('RUN_BASELINE ' +\n self.inference_config_str(\n base_config) + ' done')\n\n if os.path.exists(self.cache_dir):\n shutil.rmtree(self.cache_dir)\n\n pred_result = self.run_test_config(\n model, params, prog_config, pred_config, feed_data)\n self.assert_tensors_near(atol, rtol, pred_result,\n base_result)\n if not ignore_flag:\n self.assert_op_list(op_list)\n\n except Exception as e:\n self.fail_log(\n self.inference_config_str(pred_config) +\n '\\033[1;31m \\nERROR INFO: {}\\033[0m'.format(str(e)))\n if not ignore_flag:\n status = False\n continue\n self.success_log('RUN predictor_config ' + self.\n inference_config_str(pred_config) + ' done')\n\n status = self.check_op_version() and status\n self.assertTrue(status)\n\n def inference_config_str(self, config) -> str:\n dic = {}\n enable_mkldnn = config.mkldnn_enabled()\n dic['use_mkldnn'] = enable_mkldnn\n enable_gpu = config.use_gpu()\n dic['use_gpu'] = enable_gpu\n if not self.passes:\n dic['passes'] = self.passes\n\n enable_trt = config.tensorrt_engine_enabled()\n trt_precison = config.tensorrt_precision_mode()\n trt_dynamic_shape = config.tensorrt_dynamic_shape_enabled()\n if enable_trt:\n dic['use_trt'] = True\n dic['trt_precision'] = trt_precison\n dic['use_dynamic_shape'] = trt_dynamic_shape\n else:\n dic['use_trt'] = False\n return str(dic)\n\n def create_trt_inference_config(self) -> paddle_infer.Config:\n config = paddle_infer.Config()\n config.disable_glog_info()\n config.enable_use_gpu(100, 0)\n config.set_optim_cache_dir(self.cache_dir)\n config.switch_ir_debug()\n return config\n\n\nclass TrtLayerAutoScanTest(AutoScanTest):\n class TensorRTParam:\n '''\n TensorRT subgraph engine parameters. \n '''\n\n def __init__(self, workspace_size, max_batch_size, min_subgraph_size,\n precision, use_static, use_calib_mode):\n self.workspace_size = workspace_size\n self.max_batch_size = max_batch_size\n self.min_subgraph_size = min_subgraph_size\n self.precision = precision\n self.use_static = use_static\n self.use_calib_mode = use_calib_mode\n\n class DynamicShapeParam:\n '''\n Prepare TensorRT subgraph engine dynamic shape parameters. \n '''\n\n def __init__(self, min_input_shape, max_input_shape, opt_input_shape,\n disable_trt_plugin_fp16):\n self.min_input_shape = min_input_shape\n self.max_input_shape = max_input_shape\n self.opt_input_shape = opt_input_shape\n self.disable_trt_plugin_fp16 = disable_trt_plugin_fp16\n\n def __init__(self, *args, **kwargs):\n super(TrtLayerAutoScanTest, self).__init__(*args, **kwargs)\n self.trt_param = self.TensorRTParam(\n workspace_size=1024,\n max_batch_size=4,\n min_subgraph_size=0,\n precision=paddle_infer.PrecisionType.Float32,\n use_static=True,\n use_calib_mode=False)\n self.dynamic_shape = self.DynamicShapeParam({}, {}, {}, False)\n self.num_percent_cases = float(\n os.getenv(\n 'TEST_NUM_PERCENT_CASES', default='1.0'))\n # Choose different tests by week\n np.random.seed(int(time.strftime(\"%W\")))\n\n def create_inference_config(self, use_trt=True) -> paddle_infer.Config:\n config = paddle_infer.Config()\n config.disable_glog_info()\n config.enable_use_gpu(100, 0)\n config.set_optim_cache_dir(self.cache_dir)\n if use_trt:\n config.switch_ir_debug()\n config.enable_tensorrt_engine(\n max_batch_size=self.trt_param.max_batch_size,\n workspace_size=self.trt_param.workspace_size,\n min_subgraph_size=self.trt_param.min_subgraph_size,\n precision_mode=self.trt_param.precision,\n use_static=self.trt_param.use_static,\n use_calib_mode=self.trt_param.use_calib_mode)\n if len(self.dynamic_shape.min_input_shape\n ) != 0 and self.dynamic_shape.min_input_shape.keys(\n ) == self.dynamic_shape.max_input_shape.keys(\n ) and self.dynamic_shape.min_input_shape.keys(\n ) == self.dynamic_shape.opt_input_shape.keys():\n config.set_trt_dynamic_shape_info(\n self.dynamic_shape.min_input_shape,\n self.dynamic_shape.max_input_shape,\n self.dynamic_shape.opt_input_shape,\n self.dynamic_shape.disable_trt_plugin_fp16)\n return config\n\n def assert_op_size(self, trt_engine_num, paddle_op_num):\n last_passed_program = os.path.join(\n self.cache_dir, 'transpose_flatten_concat_fuse_pass.pdmodel')\n model_bytes = paddle.static.load_from_file(last_passed_program)\n pg = paddle.static.deserialize_program(model_bytes)\n main_block = pg.desc.block(0)\n op_size = main_block.op_size()\n op_types = [\n main_block.op(i).type() == 'tensorrt_engine' for i in range(op_size)\n ]\n trt_engine_size = sum(op_types)\n paddle_op_size = op_size - trt_engine_size\n self.assertTrue(trt_engine_size == trt_engine_num,\n 'trt_engine_num is {}, but got {}!'.format(\n trt_engine_size, trt_engine_num))\n self.assertTrue(paddle_op_size == paddle_op_num,\n 'paddle_op_num is {}, but got {}!'.format(\n paddle_op_size, paddle_op_num))\n\n def inference_config_str(self, config: paddle_infer.Config) -> str:\n dic = {}\n enable_trt = config.tensorrt_engine_enabled()\n trt_precison = config.tensorrt_precision_mode()\n trt_dynamic_shape = config.tensorrt_dynamic_shape_enabled()\n if enable_trt:\n dic['use_trt'] = True\n dic['trt_precision'] = trt_precison\n dic['use_dynamic_shape'] = trt_dynamic_shape\n else:\n dic['use_trt'] = False\n return str(dic)\n\n def run_test(self, quant=False, *args, **kwargs):\n status = True\n run_flags = []\n for prog_config in self.sample_program_configs(*args, **kwargs):\n # In CI, only run 10% cases\n if np.random.rand() < self.num_percent_cases:\n run_flags.append(True)\n else:\n run_flags.append(False)\n\n for prog_config, run_flags in zip(\n self.sample_program_configs(*args, **kwargs), run_flags):\n if not run_flags:\n continue\n\n # if program is invalid, we should skip that cases.\n if not self.is_program_valid(prog_config):\n continue\n\n model, params = create_fake_model(prog_config)\n if quant:\n model, params = create_quant_model(model, params)\n\n feed_data = {}\n for name, tensor_config in prog_config.inputs.items():\n feed_data[name] = {\n 'data': tensor_config.data,\n 'lod': tensor_config.lod\n }\n\n results: List[Dict[str, np.ndarray]] = []\n\n # baseline: gpu run\n logging.info('RUN program_config: ' + str(prog_config))\n gpu_config = self.create_inference_config(use_trt=False)\n results.append(\n self.run_test_config(model, params, prog_config, gpu_config,\n feed_data))\n self.success_log('RUN_GPU_BASELINE done')\n\n for pred_config, nodes_num, threshold in self.sample_predictor_configs(\n prog_config):\n\n if os.path.exists(self.cache_dir):\n shutil.rmtree(self.cache_dir)\n\n if isinstance(threshold, float):\n atol = threshold\n rtol = 1e-8\n elif isinstance(threshold, list) or isinstance(threshold,\n tuple):\n atol = threshold[0]\n rtol = threshold[1]\n else:\n raise NotImplementedError\n\n if quant and pred_config.tensorrt_precision_mode(\n ) != paddle_infer.PrecisionType.Int8:\n continue\n if pred_config.tensorrt_precision_mode(\n ) == paddle_infer.PrecisionType.Int8 and not quant:\n continue\n\n ignore_flag = False\n for ignore_info in self.ignore_cases:\n if ignore_info[0](prog_config, pred_config):\n ignore_flag = True\n if ignore_info[1] == IgnoreReasons.TRT_NOT_IMPLEMENTED:\n self.ignore_log(\"[TRT_NOT_IMPLEMENTED] \" +\n ignore_info[2] + ' ' + ' vs ' +\n self.inference_config_str(\n pred_config))\n elif ignore_info[1] == IgnoreReasons.TRT_NOT_SUPPORT:\n self.ignore_log(\"[TRT_NOT_SUPPORT] \" + ignore_info[\n 2] + ' ' + ' vs ' + self.inference_config_str(\n pred_config))\n else:\n raise NotImplementedError\n break\n\n try:\n pred_config_deserialize = paddle_infer.Config(pred_config)\n results.append(\n self.run_test_config(model, params, prog_config,\n pred_config, feed_data))\n self.assert_tensors_near(atol, rtol, results[-1],\n results[0])\n if not ignore_flag:\n self.assert_op_size(nodes_num[0], nodes_num[1])\n # deserialize test\n if nodes_num[0] > 0:\n self.run_test_config(model, params, prog_config,\n pred_config_deserialize, feed_data)\n except Exception as e:\n self.fail_log(\n str(prog_config) + ' vs ' + self.inference_config_str(\n pred_config) +\n '\\033[1;31m \\nERROR INFO: {}\\033[0m'.format(str(e)))\n if not ignore_flag:\n status = False\n continue\n self.success_log('RUN predictor_config ' + self.\n inference_config_str(pred_config) + ' done')\n\n self.assertTrue(status)\n\n # TODO(wilber): just for backward compatible\n def add_skip_case(\n self,\n teller: [Callable[[ProgramConfig, paddle_infer.Config], bool]],\n reason: IgnoreReasons,\n note: str):\n self.ignore_cases.append((teller, reason, note))\n"
] |
[
[
"numpy.random.rand",
"numpy.amax",
"numpy.allclose",
"numpy.random.seed"
]
] |
rsdefever/mosdef_slitpore
|
[
"2150d1d0e062bf6aac660be8ae73d94e2a3c4438",
"2150d1d0e062bf6aac660be8ae73d94e2a3c4438"
] |
[
"simulations/bulk-water/gomc/analysis/Data_analysis.py",
"mosdef_slitpore/analysis.py"
] |
[
"import numpy as np\nimport csv as csv\nimport pandas as pd\n\n#check all thermal expansion errors and cp errors before providing data\n\nStep_start = 50 *10**6\nStep_finish = 100 *10**6\nTemp_K =298\n\n\nSet_List = ['set1', 'set2', 'set3', 'set4', 'set5']\nChemPot = [-6295, -5573, -4852, -4491, -4401, -4130, -4040, -3950]\nrun_No = [ '1r1', '1r2', '1r3', '1r4', '1r5', '1r6', '1r7', '1r8']\nVolume_box_Ang_cubed = [5000**3, 2000**3, 1000**3, 600**3, 600**3, 400**3, 400**3, 300**3]\nColumn_no_Step = 0 # must be iteration value\nColumn_no_Pressure = 10 # column for PRESSURE\nColumn_Total_Molecules = 11 # column for TOT_MOL\nColumn_Density = 12 # column for TOT_DENS\n\nChemical_being_analyzed = 'water'\n\n\n#*******************************************************************************************************************\n# need to insert blocking_std_dev method to calculate accuated Std. Deviations to get accurate results\n#*******************************************************************************************************************\n\nPsat_data_file = '../../../Psat_SPCE_298K/gomc/analysis/SPCE_Pvap_at_298K_df.csv'\nPsat_data = pd.read_csv(Psat_data_file, sep=',', header=0, na_values='NaN',\n\t\t\t\t\t\tusecols=[0,1], index_col=False)\n\nPsat_data = pd.DataFrame(Psat_data)\nprint(Psat_data)\n\nPsat_SPEC_data_bar = Psat_data.loc[:,'Avg_Pvap_bar']\nPsat_SPEC_data_bar = list(Psat_SPEC_data_bar)[0]\nprint('Psat_SPEC_data_bar = '+str(Psat_SPEC_data_bar))\nPsat_at_298K = Psat_SPEC_data_bar\n\nTemp_List = []\nPressure_Box_0_List = []\nTotal_Molecules_Box_0_List = []\nDensity_Box_0_List = []\nP_from_IG_Density_Box_0_List =[]\nPressure_Box_1_List = []\nTotal_Molecules_Box_1_List = []\nDensity_Box_1_List = []\n\n\nPressure_Box_0_Final_Mean_List = []\nPressure_Box_0_Final_StdDev_List = []\nTotal_Molecules_Box_0_Final_Mean_List = []\nTotal_Molecules_Box_0_Final_StdDev_List = []\nDensity_Box_0_Final_Mean_List = []\nDensity_Box_0_Final_StdDev_List = []\nP_from_IG_Density_Box_0_Final_Mean_List =[]\nP_from_IG_Density_Box_0_Final_StdDev_List =[]\nPressure_Box_1_Final_Mean_List = []\nPressure_Box_1_Final_StdDev_List = []\nTotal_Molecules_Box_1_Final_Mean_List = []\nTotal_Molecules_Box_1_Final_StdDev_List = []\nDensity_Box_1_Final_StdDev_Final_Mean_List = []\nDensity_Box_1_Final_StdDev_List = []\n\n\nDensity_Avg_Box_0_and_1_List = []\n\nBox_data_save_file_name = 'Avg_data_from Box_'\n# ***********************\n# calc the avg data from the liq and vap boxes (start)\n# ***********************\n\n\n\n\nfor j in range(len(run_No)):\n\trun_No_run_file_file1 = j\n\trun_No_iteration = run_No[j]\n\n\tPressure_Box_0_iteration_List = []\n\tTotal_Molecules_Box_0_iteration_List = []\n\tDensity_Box_0_iteration_List = []\n\tP_from_IG_Density_Box_0_iteration_List = []\n\tPressure_Box_0_iteration_0_List = []\n\n\tfor n in range(0, len(Set_List)):\n\t\tset_iteration = Set_List[n]\n\n\n\n\n\t\tfirst_part_run_path = '../'+str(set_iteration)\n\n\t\treading_file_Box_0 = first_part_run_path+'/'+str(run_No[j])+'/'+'Blk_SPCE_Pvap_BOX_0.dat'\n\n\n\n\n\t\t#*******************************************************************************************************\n\t\t#end major variables\n\t\t# *******************************************************************************************************\n\t\tColumn_Psat_ratio_Title = 'Psat_ratio' #\n\t\tColumn_ChemPot_Title = 'ChemPot_K' # column title Title for PRESSURE\n\t\tColumn_no_Step_Title = 'Step' # column title Title for iteration value\n\t\tColumn_no_Pressure_Title = 'P_bar' # column title Title for PRESSURE\n\t\tColumn_no_Pressure_from_IG_density = 'P_from_IG_Density_bar'\n\t\tColumn_Total_Molecules_Title = \"No_mol\" # column title Title for TOT_MOL\n\t\tColumn_Density_Title = 'Density_kg_per_mcubed' # column title Title for TOT_DENS\n\n\n\t\tExtracted_Data_file_Titles = [Column_no_Step_Title, Column_no_Pressure_Title,\n\t\t\t\t\t\t\t\t\t Column_Total_Molecules_Title, Column_Density_Title]\n\n\n\n\n\t\t#Programmed data\n\t\tStep_start_string = str(int(Step_start))\n\t\tStep_finish_string = str(int(Step_finish))\n\n\t\t#*************************\n\t\t#drawing in data from single file and extracting specific rows for the liquid box (start)\n\t\t# *************************\n\t\tdata_Box_0_iteration = pd.read_csv(reading_file_Box_0, names=Extracted_Data_file_Titles, sep='\\s+', header=1, na_values='NaN',\n\t\t\t\t\t\t\tusecols=[Column_no_Step,Column_no_Pressure, Column_Total_Molecules,\n\t\t\t\t\t\t\t\t\t Column_Density], index_col=False)\n\n\t\tdata_Box_0_iteration = pd.DataFrame(data_Box_0_iteration)\n\t\tdata_Box_0_iteration = data_Box_0_iteration.query(Step_start_string +' <= ' + Column_no_Step_Title + ' <= ' + Step_finish_string)\n\t\t##print('Liquid data')\n\t\t#print(data_Box_0)\n\n\t\tNo_Box_0_iteration = data_Box_0_iteration.loc[:,Column_no_Step_Title]\n\t\tNo_Box_0_iteration = list(No_Box_0_iteration)\n\t\tNo_Box_0_iteration = np.transpose(No_Box_0_iteration)\n\n\t\tPressure_Box_0_iteration = data_Box_0_iteration.loc[:,Column_no_Pressure_Title]\n\t\tPressure_Box_0_iteration = list(Pressure_Box_0_iteration)\n\t\tPressure_Box_0_iteration = np.transpose(Pressure_Box_0_iteration)\n\t\tPressure_Box_0_iteration_Mean = np.nanmean(Pressure_Box_0_iteration)\n\n\t\tTotal_Molecules_Box_0_iteration = data_Box_0_iteration.loc[:,Column_Total_Molecules_Title]\n\t\tTotal_Molecules_Box_0_iteration = list(Total_Molecules_Box_0_iteration)\n\t\tTotal_Molecules_Box_0_iteration = np.transpose(Total_Molecules_Box_0_iteration)\n\t\tTotal_Molecules_Box_0_iteration_Mean = np.nanmean(Total_Molecules_Box_0_iteration)\n\n\t\tDensity_Box_0_iteration = data_Box_0_iteration.loc[:,Column_Density_Title]\n\t\tDensity_Box_0_iteration = list(Density_Box_0_iteration)\n\t\tDensity_Box_0_iteration = np.transpose(Density_Box_0_iteration)\n\t\tDensity_Box_0_iteration_Mean = np.nanmean(Density_Box_0_iteration)\n\n\n\t\t#*************************\n\t\t#drawing in data from single file and extracting specific rows for the liquid box (end)\n\t\t# *************************\n\n\n\t\t#*************************\n\t\t#drawing in data from single file and extracting specific rows for the vapor box (start)\n\t\t# *************************\n\n\n\t\tChemPot_List=ChemPot\n\n\t\tPressure_Box_0_iteration_List.append(Pressure_Box_0_iteration_Mean)\n\t\tTotal_Molecules_Box_0_iteration_List.append(Total_Molecules_Box_0_iteration_Mean)\n\t\tDensity_Box_0_iteration_List.append(Density_Box_0_iteration_Mean)\n\n\t\tR_ang_cubed_bar_per_K_mol = 8.31446261815324E+025\n\t\tAvagadro_No = 6.022*10**23\n\n\n\n\tPressure_Box_0_List.append(np.mean(Pressure_Box_0_iteration_List))\n\tTotal_Molecules_Box_0_List.append(np.mean(Total_Molecules_Box_0_iteration_List))\n\tDensity_Box_0_List.append(np.mean(Density_Box_0_iteration_List))\n\n\nfor i in range(0, len(Pressure_Box_0_List)):\n\tP_from_IG_Density_Box_0_List.append(\n\t\tR_ang_cubed_bar_per_K_mol * Total_Molecules_Box_0_List[i] * Temp_K / (Avagadro_No * Volume_box_Ang_cubed[i]))\n\n\nratio_of_Psat_ratio = [i/Psat_at_298K for i in Pressure_Box_0_List]\n\n\n\n\t#*************************\n\t#drawing in data from single file and extracting specific rows for the vapor box (end)\n\t# *************************\nprint('ratio_of_Psat_ratio = ' + str(ratio_of_Psat_ratio))\nprint('ChemPot_List = '+str(ChemPot_List))\nprint('Pressure_Box_0_List = '+str(Pressure_Box_0_List))\nprint('Total_Molecules_Box_0_List = '+str(Total_Molecules_Box_0_List))\nprint('Density_Box_0_List = '+str(Density_Box_0_List))\n\n\nBox_0_data_dataframe =pd.DataFrame(np.column_stack([ratio_of_Psat_ratio,\n\t\t\t\t\t\t\t\t\t\t\t\t\tChemPot_List,\n\t\t\t\t\t\t\t\t\t\t\t\t\tPressure_Box_0_List,\n\t\t\t\t\t\t\t\t\t\t\t\t\tP_from_IG_Density_Box_0_List ,\n\t\t\t\t\t\t\t\t\t\t\t\t\tTotal_Molecules_Box_0_List,\n\t\t\t\t\t\t\t\t\t\t\t\t\tDensity_Box_0_List]))\n\n\nBox_0_data_dataframe.to_csv(Box_data_save_file_name + '0_'+str(Chemical_being_analyzed)+'_df.txt', sep=\"\t\",\n\t\t\t\t\t\t\theader=[Column_Psat_ratio_Title, Column_ChemPot_Title, Column_no_Pressure_Title,\n\t\t\t\t\t\t\t\t\tColumn_no_Pressure_from_IG_density,\n\t\t\t\t\t\t\t\t\tColumn_Total_Molecules_Title, Column_Density_Title])\nBox_0_data_dataframe.to_csv(Box_data_save_file_name + '0_'+str(Chemical_being_analyzed)+'_df.csv', sep=\",\",\n\t\t\t\t\t\t\theader=[Column_Psat_ratio_Title, Column_ChemPot_Title, Column_no_Pressure_Title,\n\t\t\t\t\t\t\t\t Column_no_Pressure_from_IG_density,\n\t\t\t\t\t\t\t\t Column_Total_Molecules_Title, Column_Density_Title])\n\n",
"import numpy as np\n\n\ndef compute_density(\n traj,\n area,\n surface_normal_dim=2,\n pore_center=0.0,\n max_distance=1.0,\n bin_width=0.01,\n symmetrize=False,\n):\n \"\"\"Compute the density of traj in atoms/nm^3\n\n Parameters\n ----------\n traj : mdtraj.Trajectory,\n trajectory to analyze\n area : float\n area of the surface in nm^2\n surface_normal_dim : enum (0,1,2), optional, default = 2\n direction normal to the surface (x:0, y:1, z:2)\n pore_center : float, optional, default = 0.0\n coordinate of the pore center along surface_normal_dim\n max_distance : float, optional, default = 1.0\n max distance to consider from the center of the pore\n bin_width : float, optional, default = 0.01\n width of the bin for computing s\n symmetrize : bool, optional, default = False\n if binning should be done in abs(z) instead of z\n Returns\n -------\n bin_centers : np.ndarray\n the bin centers, shifted so that pore_center is at 0.0\n density : np.ndarray\n the density (atoms / nm^3) in each bin\n \"\"\"\n if symmetrize:\n distances = abs(traj.xyz[:, :, surface_normal_dim] - pore_center)\n else:\n distances = traj.xyz[:, :, surface_normal_dim] - pore_center\n bin_centers = []\n density = []\n for bin_center in np.arange(-max_distance, max_distance, bin_width):\n mask = np.logical_and(\n distances > bin_center - 0.5 * bin_width,\n distances < bin_center + 0.5 * bin_width,\n )\n bin_centers.append(bin_center)\n if symmetrize:\n if np.isclose(bin_center, 0):\n density.append(mask.sum() / (area * 1 * bin_width * traj.n_frames))\n else:\n density.append(mask.sum() / (area * 2 * bin_width * traj.n_frames))\n else:\n density.append(mask.sum() / (area * bin_width * traj.n_frames))\n\n return bin_centers, density\n\n\ndef compute_s(\n traj,\n surface_normal_dim=2,\n pore_center=0.0,\n max_distance=1.0,\n bin_width=0.01,\n bond_array=None,\n symmetrize=False,\n):\n\n \"\"\"Compute the \"s\" order parameter\n\n Parameters\n ----------\n traj : mdtraj.Trajectory,\n trajectory to analyze\n surface_normal_dim : enum (0,1,2), optional, default = 2\n direction normal to the surface (x:0, y:1, z:2)\n pore_center : float, optional, default = 0.0\n coordinate of the pore center along surface_normal_dim\n max_distance : float, optional, default = 1.0\n max distance to consider from the center of the pore\n bin_width : float, optional, default = 0.01\n width of the bin for computing\n bond_array : np.array(dtype=np.int32), optional, default = None\n Array of bonds to pass into `make_molecules_whole`\n Warning: This argument is necessary if loading in a mol2 file due to a\n current bug in the MDTraj MOL2 reader: https://github.com/mdtraj/mdtraj/issues/1581\n symmetrize : bool, optional, default = False\n if binning should be done in abs(z) instead of z\n\n Returns\n -------\n bin_centers : np.ndarray\n the bin centers, shifted so that pore_center is at 0.0\n s_values : np.ndarray\n the value of s for each bin\n \"\"\"\n # Make molecules whole first\n traj.make_molecules_whole(inplace=True, sorted_bonds=bond_array)\n # Select ow and hw\n water_o = traj.top.select(\"water and name O\")\n water_h = traj.top.select(\"water and name H\")\n traj_ow = traj.atom_slice(water_o)\n traj_hw = traj.atom_slice(water_h)\n\n # Compute angles between surface normal ([0,0,1]) and h-o-h bisector\n hw_midpoints = traj_hw.xyz.reshape(traj_hw.n_frames, -1, 2, 3).mean(axis=2)\n vectors = traj_ow.xyz - hw_midpoints\n vectors /= np.linalg.norm(vectors, axis=-1, keepdims=True)\n cos_angles = vectors[:, :, surface_normal_dim]\n\n # Compute distances -- center of pore already @ 0,0; use OW position\n if symmetrize:\n distances = abs(traj_ow.xyz[:, :, surface_normal_dim] - pore_center)\n else:\n distances = traj_ow.xyz[:, :, surface_normal_dim] - pore_center\n bin_centers = []\n s_values = []\n for bin_center in np.arange(-max_distance, max_distance, bin_width):\n mask = np.logical_and(\n distances > bin_center - 0.5 * bin_width,\n distances < bin_center + 0.5 * bin_width,\n )\n s = (3.0 * np.mean(cos_angles[mask] ** 2) - 1.0) / 2.0\n bin_centers.append(bin_center)\n s_values.append(s)\n\n return bin_centers, s_values\n\n\ndef compute_mol_per_area(\n traj, area, dim, box_range, n_bins, shift=True, frame_range=None\n):\n \"\"\"\n Calculate molecules per area\n Parameters\n ----------\n traj : mdtraj.trajectory\n Trajectory\n area : int or float\n Area of box in dimensions where number density isn't calculated\n dim : int\n Dimension to calculate number density profile (x: 0, y: 1, z: 2)\n box_range : array\n Range of coordinates in 'dim' to evaluate\n n_bins : int\n Number of bins in histogram\n shift : boolean, default=True\n Shift center to zero if True\n frame_range : Python range() (optional)\n Range of frames to calculate number density function over\n\n Returns\n -------\n areas : list\n A list containing number density for each bin\n new_bins : list\n A list of bins\n \"\"\"\n water_o = traj.atom_slice(traj.topology.select(\"name O\"))\n resnames = np.unique([x.name for x in water_o.topology.residues])\n\n if frame_range:\n water_o = water_o[frame_range]\n for i, frame in enumerate(water_o):\n indices = [\n [atom.index for atom in compound.atoms]\n for compound in list(frame.topology.residues)\n ]\n\n if frame_range:\n if i == 0:\n x = np.histogram(\n frame.xyz[0, indices, dim].flatten(),\n bins=n_bins,\n range=(box_range[0], box_range[1]),\n )\n areas = x[0]\n bins = x[1]\n else:\n areas += np.histogram(\n frame.xyz[0, indices, dim].flatten(),\n bins=n_bins,\n range=(box_range[0], box_range[1]),\n )[0]\n else:\n if i == 0:\n x = np.histogram(\n frame.xyz[0, indices, dim].flatten(),\n bins=n_bins,\n range=(box_range[0], box_range[1]),\n )\n areas = x[0]\n bins = x[1]\n else:\n areas += np.histogram(\n frame.xyz[0, indices, dim].flatten(),\n bins=n_bins,\n range=(box_range[0], box_range[1]),\n )[0]\n\n areas = np.divide(areas, water_o.n_frames)\n\n new_bins = list()\n for idx, bi in enumerate(bins):\n if (idx + 1) >= len(bins):\n continue\n mid = (bins[idx] + bins[idx + 1]) / 2\n new_bins.append(mid)\n\n if shift:\n middle = float(n_bins / 2)\n if middle % 2 != 0:\n shift_value = new_bins[int(middle - 0.5)]\n else:\n shift_value = new_bins[int(middle)]\n new_bins = [(bi - shift_value) for bi in new_bins]\n\n return (areas, new_bins)\n\n\ndef compute_angle(\n traj,\n surface_normal_dim=2,\n pore_center=0.0,\n max_distance=1.0,\n bin_width=0.01,\n symmetrize=False,\n bond_array=None,\n):\n \"\"\"Compute the cos(angle) between HOH bisector and graphene surface normal\n\n Parameters\n ----------\n traj : mdtraj.Trajectory,\n trajectory to analyze\n surface_normal_dim : enum (0,1,2), optional, default = 2\n direction normal to the surface (x:0, y:1, z:2)\n pore_center : float, optional, default = 0.0\n coordinate of the pore center along surface_normal_dim\n max_distance : float, optional, default = 1.0\n max distance to consider from the center of the pore\n bin_width : float, optional, default = 0.01\n width of the bin for computing s\n symmetrize : bool, optional, default = False\n if binning should be done in abs(z) instead of z\n bond_array : np.array(dtype=np.int32), optional, default = None\n Array of bonds to pass into `make_molecules_whole`\n Warning: This argument is necessary if loading in a mol2 file due to a\n current bug in the MDTraj MOL2 reader: https://github.com/mdtraj/mdtraj/issues/1581\n Returns\n -------\n bin_centers : np.ndarray\n the bin centers, shifted so that pore_center is at 0.0\n cos_angle_values : np.ndarray\n the value of average cos(angle) for each bin\n cos_angles: np.ndarray\n array that contains all the samples for cos(angle)\n \"\"\"\n # Make molecules whole first\n traj.make_molecules_whole(inplace=True, sorted_bonds=bond_array)\n # Select ow and hw\n water_o = traj.top.select(\"water and name O\")\n water_h = traj.top.select(\"water and name H\")\n traj_ow = traj.atom_slice(water_o)\n traj_hw = traj.atom_slice(water_h)\n\n # Compute angles between surface normal ([0,0,1]/[0,0,-1]) and h-o-h bisector\n hw_midpoints = traj_hw.xyz.reshape(traj_hw.n_frames, -1, 2, 3).mean(axis=2)\n\n vectors = traj_ow.xyz - hw_midpoints\n vectors /= np.linalg.norm(vectors, axis=-1, keepdims=True)\n cos_angles = vectors[:, :, surface_normal_dim]\n # The surface normal is decided by looking at the position of O in H2O\n side_of_pore = np.sign(-traj_ow.xyz[:, :, surface_normal_dim] + pore_center)\n cos_angles = np.multiply(cos_angles, side_of_pore)\n # Compute distances -- center of pore already @ 0,0; use OW position\n if symmetrize:\n distances = abs(traj_ow.xyz[:, :, surface_normal_dim] - pore_center)\n else:\n distances = traj_ow.xyz[:, :, surface_normal_dim] - pore_center\n bin_centers = []\n cos_angle_values = []\n for bin_center in np.arange(-max_distance, max_distance, bin_width):\n mask = np.logical_and(\n distances > bin_center - 0.5 * bin_width,\n distances < bin_center + 0.5 * bin_width,\n )\n cos_angle = np.mean(cos_angles[mask])\n bin_centers.append(bin_center)\n cos_angle_values.append(cos_angle)\n\n return bin_centers, cos_angle_values, cos_angles\n"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame",
"numpy.mean",
"numpy.nanmean",
"numpy.transpose",
"numpy.column_stack"
],
[
"numpy.multiply",
"numpy.unique",
"numpy.arange",
"numpy.linalg.norm",
"numpy.sign",
"numpy.mean",
"numpy.logical_and",
"numpy.divide",
"numpy.isclose"
]
] |
syforcee/GUPB
|
[
"f916acf94efe61c54fa7b4cc33d3f94821fdb3d7"
] |
[
"gupb/controller/krowa123/big_brains/PER.py"
] |
[
"import numpy as np\n\n\nclass SumTree(object):\n data_pointer = 0\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.tree = np.zeros(2 * capacity - 1)\n self.data = np.zeros(capacity, dtype=object)\n\n def add(self, priority, data):\n tree_index = self.data_pointer + self.capacity - 1\n self.data[self.data_pointer] = data\n self.update(tree_index, priority)\n self.data_pointer += 1\n if self.data_pointer >= self.capacity:\n self.data_pointer = 0\n\n def update(self, tree_index, priority):\n change = priority - self.tree[tree_index]\n self.tree[tree_index] = priority\n while tree_index != 0:\n tree_index = (tree_index - 1) // 2\n self.tree[tree_index] += change\n\n def get_leaf(self, v):\n parent_index = 0\n while True:\n left_child_index = 2 * parent_index + 1\n right_child_index = left_child_index + 1\n\n if left_child_index >= len(self.tree):\n leaf_index = parent_index\n break\n else:\n if v <= self.tree[left_child_index]:\n parent_index = left_child_index\n else:\n v -= self.tree[left_child_index]\n parent_index = right_child_index\n\n data_index = leaf_index - self.capacity + 1\n\n return leaf_index, self.tree[leaf_index], self.data[data_index]\n\n @property\n def total_priority(self):\n return self.tree[0]\n\n\nclass Memory(object):\n PER_e = 0.01\n PER_a = 0.6\n PER_b = 0.4\n\n PER_b_increment_per_sampling = 0.001\n\n absolute_error_upper = 1.\n\n def __init__(self, capacity):\n self.tree = SumTree(capacity)\n\n def store(self, experience):\n max_priority = np.max(self.tree.tree[-self.tree.capacity:])\n\n if max_priority == 0:\n max_priority = self.absolute_error_upper\n\n self.tree.add(max_priority, experience)\n\n def sample(self, n):\n minibatch = []\n\n b_idx = np.empty((n,), dtype=np.int32)\n\n priority_segment = self.tree.total_priority / n # priority segment\n\n for i in range(n):\n a, b = priority_segment * i, priority_segment * (i + 1)\n value = np.random.uniform(a, b)\n\n index, priority, data = self.tree.get_leaf(value)\n\n b_idx[i] = index\n\n minibatch.append([data[0], data[1], data[2], data[3], data[4]])\n\n return b_idx, minibatch\n\n def batch_update(self, tree_idx, abs_errors):\n abs_errors += self.PER_e\n clipped_errors = np.minimum(abs_errors, self.absolute_error_upper)\n ps = np.power(clipped_errors, self.PER_a)\n\n for ti, p in zip(tree_idx, ps):\n self.tree.update(ti, p)\n"
] |
[
[
"numpy.minimum",
"numpy.power",
"numpy.max",
"numpy.random.uniform",
"numpy.zeros",
"numpy.empty"
]
] |
yt7589/wdd
|
[
"378c4348cf64c88c72623b88a87e751270e75386"
] |
[
"apps/kwm/kwm_app.py"
] |
[
"#\nimport numpy as np\nfrom apps.kwm.ds.kwm_ds import KwmDs\nfrom apps.kwm.xgb_engine import XgbEngine\n\nclass KwmApp(object):\n WORK_RAW_DATA = 1\n WORK_TXT_DATA =2\n WORK_CSV_DATA = 3\n\n def __init__(self):\n self.refl = 'apps.kwm.KwmApp'\n\n def startup(self, args={}):\n print('晶圆缺陷检测应用WDD v0.0.2')\n mode = KwmApp.WORK_CSV_DATA\n ds = KwmDs()\n if KwmApp.WORK_RAW_DATA == mode:\n X, y = ds.generate_ds()\n print('保存数据集到文本文件:')\n np.savetxt('./datas/kwm_x.txt', X, fmt='%.18e', delimiter=',', newline='\\n', encoding='utf-8')\n np.savetxt('./datas/kwm_y.txt', y, fmt='%d', delimiter=',', newline='\\n', encoding='utf-8')\n elif KwmApp.WORK_TXT_DATA == mode:\n X, y = ds.load_processed_ds()\n print('X: type={0}; shape={1};'.format(type(X), X.shape))\n print('y: type={0}; shape={1};'.format(type(y), y.shape))\n clss = np.unique(y)\n print('classes:{0};'.format(clss))\n # self.save_csv(X, y) # 为XGBoost算法准备数据集\n elif KwmApp.WORK_CSV_DATA == mode:\n self.run_xgboost()\n print('^_^ The End ^_^')\n\n def run_xgboost(self):\n print('XGBoost算法应用')\n xgbe = XgbEngine('./datas/kwm_xgb.csv', 59, 8)\n xgbe.startup(XgbEngine.MODE_TRAIN)\n\n def save_csv(self, X, y):\n y = y.reshape(y.shape[0], 1)\n data = np.concatenate((X, y), axis=1)\n print('data: {0};'.format(data.shape))\n #np.savetxt('./datas/kwm_xgb.csv', data, delimiter=',', newline='\\n', encoding='utf-8')\n with open('./datas/kwm_xgb.csv', 'w', encoding='utf-8') as fd:\n for row in data:\n for idx in range(0, 59):\n fd.write('{0},'.format(row[idx]))\n fd.write('{0}\\n'.format(int(row[59])))\n"
] |
[
[
"numpy.concatenate",
"numpy.unique",
"numpy.savetxt"
]
] |
xinshi-chen/MultiDAG
|
[
"0b041eac793c31b1acc2e737a6c46579e362e117"
] |
[
"multidag/common/consts.py"
] |
[
"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom matplotlib import cm\nimport torch.optim as optim\n\n\nimport argparse\nimport logging\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nt_float = torch.float32\nnp_float = np.float32\nstr_float = \"float32\"\n\nopts = argparse.ArgumentParser(description='gpu option')\nopts.add_argument('-gpu', type=int, default=-1, help='-1: cpu; 0 - ?: specific gpu index')\n\nargs, _ = opts.parse_known_args()\nif torch.cuda.is_available() and args.gpu >= 0:\n DEVICE = torch.device('cuda:' + str(args.gpu))\n print('use gpu indexed: %d' % args.gpu)\nelse:\n DEVICE = torch.device('cpu')\n print('use cpu')\n\n\nclass Lambda(nn.Module):\n\n def __init__(self, f):\n super(Lambda, self).__init__()\n self.f = f\n\n def forward(self, x):\n return self.f(x)\n\n\nclass Swish(nn.Module):\n\n def __init__(self):\n super(Swish, self).__init__()\n self.beta = nn.Parameter(torch.tensor(1.0))\n\n def forward(self, x):\n return x * torch.sigmoid(self.beta * x)\n\n\nNONLINEARITIES = {\n \"tanh\": nn.Tanh(),\n \"relu\": nn.ReLU(),\n \"softplus\": nn.Softplus(),\n \"softmax\": nn.Softmax(dim=-1),\n \"sigmoid\": nn.Sigmoid(),\n \"elu\": nn.ELU(),\n \"swish\": Swish(),\n \"square\": Lambda(lambda x: x**2),\n \"identity\": Lambda(lambda x: x),\n}\n\nOPTIMIZER = {\n 'adam': optim.Adam,\n 'sgd': optim.SGD,\n 'adagrad': optim.Adagrad\n}\n\n\ncolors = dict()\ncolors['gd'] = cm.Paired(5)\ncolors['nag'] = cm.Paired(1)\ncolors['mlp_rnn'] = cm.Paired(3)\n"
] |
[
[
"torch.nn.Softmax",
"torch.sigmoid",
"torch.nn.Softplus",
"matplotlib.cm.Paired",
"torch.nn.ELU",
"torch.nn.Tanh",
"torch.nn.Sigmoid",
"torch.tensor",
"torch.cuda.is_available",
"torch.device",
"torch.nn.ReLU"
]
] |
jonhilgart22/glavanize-projects
|
[
"39fd6aea1b42975a5cf7c3b15512c732ea12d60f"
] |
[
"Natural_Language_Processing/code/sentence_generator.py"
] |
[
"__author__ = 'Jonathan Hilgart'\n#encoding utf-8\nfrom textblob import TextBlob\nfrom collections import Counter\nfrom collections import defaultdict\nimport numpy as np\nfrom sklearn.metrics.pairwise import linear_kernel\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport nltk\nimport re\n\n# cond_prob_bigram / cond_prob_trigram code inspired by Brian Spiering\n\nclass SentenceGenerator():\n\t\"\"\"Probabilistic language generation using a trigram model.\"\"\"\n\n\tdef __init__(self,story_text):\n\t\tblob_text = TextBlob(story_text)\n\t\ttext_tokens = blob_text.tokens\n\t\tself.text_counter_unigram = Counter(text_tokens)\n\t\tself.text_counter_bigram= Counter(nltk.bigrams(text_tokens))\n\t\tself.text_counter_trigram= Counter(nltk.trigrams(text_tokens))\n\t\t#Probability distributions for different text combinations\n\t\t#This takes ~10 minutes to run\n\t\tself.prob_dist_unigram = self.prob_distribution(self.text_counter_unigram)\n\t\tself.prob_dist_bigram = self.prob_distribution(self.text_counter_bigram)\n\t\tself.prob_dist_trigram = self.prob_distribution(self.text_counter_trigram)\n\tdef prob_distribution(self,counter):\n\t \"\"\"Return the probability based upon a counter dictionary\"\"\"\n\t return {k:value/sum(counter.values()) for k,value in counter.items()}\n\tdef cond_prob_bigram(self,end_word, first_word):\n\t \"\"\"Conditional probability of word, given previous word. Predicting end word\"\"\"\n\t #Inspiration from http://norvig.com/spell-correct.html\n\t bigram = (first_word , end_word)\n\t try: # might not have the keys for a bigram\n\t if self.prob_dist_bigram[bigram] > 0 and self.prob_dist_unigram[first_word] > 0:\n\n\t return self.prob_dist_bigram[bigram] / self.prob_dist_unigram[first_word]\n\t except:\n\t # Average the back-off value and zero. This is the smoothing\n\t return self.prob_dist_unigram[end_word] / 2\n\tdef cond_prob_trigram(self,start,middle,end):\n\t \"\"\"Find the conditional probability of a trigram model given the two previous words. Predicting the end word.\"\"\"\n\t #Inspiration from http://norvig.com/spell-correct.html\n\t trigram = (start ,middle , end)\n\t bigram = ( start,middle)\n\t try: ## might not have the keys for a trigram\n\t if self.prob_dist_trigram[trigram] > 0 and self.prob_dist_bigram[bigram] > 0 and self.prob_dist_unigram[start] >0:\n\t return self.prob_dist_trigram[trigram] / self.prob_dist_bigram[bigram] ##return prob of trigram over first two words\n\t except:\n\t try: #might not find a bigram\n\t if self.prob_dist_bigram[bigram] > 0 and self.prob_dist_unigram[start] >0: # Back off to bigram model\n\t return self.prob_dist_bigram[bigram] / self.prob_dist_unigram[start]\n\t except: # back off to unigram\n\t #back off to unigram model (three words). This is the smoothing\n\t return self.prob_dist_unigram[end] / 3\n\tdef sentence_generate(self,number_of_sentences):\n\t \"\"\"Generate random sentences based upon the probabilities given the the probability_dict\"\"\"\n\t number=0\n\t generated_text=[]\n\t character_counter = 0 \n\t #Find starting word of sentences using unigram proab\n\t sentence_endings = [\"?\",\"!\",\".\"]\n\t starting_chars = [item[1] for item in list(self.prob_dist_bigram.keys()) if item[0] in sentence_endings]\n\t starting_chars_counter = Counter(starting_chars)\n\t #find list of unigram probabilities for starting characters\n\t starting_prob = self.prob_distribution(starting_chars_counter)\n\t #Pick an initial starting character\n\t start_char_index = np.random.choice([i for i in range(len(starting_prob.keys()))],1,p=list(starting_prob.values()))\n\t generated_text.append(starting_chars[start_char_index])\n\t while number !=number_of_sentences: #make sure we have this number of sentences. Keep generating sentences if not.\n\t if len(generated_text)<3:\n\t words_list = list(self.prob_dist_bigram.keys())\n\t prev_character=generated_text[character_counter]\n\t current_word_options = [i[1] for i in self.text_counter_bigram if i[0]==prev_character] #Find bigrams with prev char\n\t prob_bigram_list = []\n\t for curr_word in current_word_options:\n\t prob_bigram_list.append(self.cond_prob_bigram(curr_word,prev_character))\n\t # weighted choice algorithm\n\t # http://stackoverflow.com/questions/22722079/choosing-elements-from-python-list-based-on-probability\n\t # 1) pick random number between 0 and 1\n\t # 2) walk through the list, subtracting each item from your number as your go\n\t # 3 ) when you go to 0 or below, pick the current item\n\t weight = np.random.random()\n\t bigram_word_index = 0\n\t for index,prob in enumerate(prob_bigram_list):\n\t weight -=prob\n\t if weight <0:\n\t bigram_word_index=index\n\t word = current_word_options[bigram_word_index]\n\t generated_text.append(word)\n\t character_counter+=1 ## go to the next character\n\t elif len(generated_text)>2: ###trigram \n\t words_list = list(self.prob_dist_trigram.keys()) ## list of all trigram\n\t first_character=generated_text[character_counter] # find the previous word (one index away)\n\t second_character=generated_text[character_counter-1] #find the previous word to the previous word\n\t current_triword_options= []\n\t prob_trigram_list = [] #list of conditional probabilities associated with the last word in the trigram\n\t for i in self.text_counter_trigram:\n\t if i[1]==first_character and i[0]==second_character: ##the first two words of our trigram\n\t curr_word = i[2] #the current word to predict\n\t prob_trigram_list.append(self.cond_prob_trigram(second_character,first_character,curr_word)) ##add prob\n\t current_triword_options.append(curr_word) ##add the possible word to our list\n\t if len(current_triword_options)==0: ## we do not have any options to continue the sentence.\n\t break\n\t weight = np.random.randint(1,sum(prob_trigram_list)*100000000)##need to change the weight because the triword options don't sum to 100%\n\t weight = weight/100000000# back to probabilities\n\t for index,prob in enumerate(prob_trigram_list):\n\t weight -=prob\n\t if weight <0: ## pick this as the word to add to the sentence\n\t word = current_triword_options[index]\n\t break\n\t generated_text.append(word)\n\t character_counter+=1 ## go to the next character \n\t if word in (\".\",\"!\",\"?\"): ##end of the sentence\n\t number+=1\n\t first_word = generated_text[0][0].upper()+generated_text[0][1:]\n\t sentence = first_word+' '\n\t for index,word in enumerate(generated_text[1:]):\n\t if generated_text[index] in (\".\",\"!\",\"?\"):\n\t sentence +=word[0].upper()+word[1:]+' '\n\t else:\n\t sentence +=word +' '\n\t return sentence\n\n"
] |
[
[
"numpy.random.random"
]
] |
ks-paul-lam/eltu2014
|
[
"86b32dda0924442dee3dab5acc687172c7aa8bc4"
] |
[
"deceptive_train.py"
] |
[
"def main():\n import os\n import fnmatch\n from textblob import TextBlob\n import pandas as pd\n from sklearn.feature_extraction.text import TfidfVectorizer\n from nltk.corpus import stopwords\n import regex as re\n from sklearn.metrics import classification_report, accuracy_score, confusion_matrix\n from sklearn.model_selection import train_test_split\n from sklearn import svm\n from sklearn.model_selection import GridSearchCV\n import pickle\n\n import nltk\n nltk.download('wordnet')\n nltk.download('stopwords')\n nltk.download('punkt')\n nltk.download('averaged_perceptron_tagger')\n\n def pos(review_without_stopwords):\n return TextBlob(review_without_stopwords).tags\n\n def svc_param_selection(X, y, nfolds):\n Cs = [0.001, 0.01, 0.1, 1, 10]\n gammas = [0.001, 0.01, 0.1, 1]\n param_grid = {'C': Cs, 'gamma': gammas}\n grid_search = GridSearchCV(svm.SVC(kernel='linear'), param_grid, cv=nfolds)\n grid_search.fit(X, y)\n return grid_search.best_params_\n\n path = 'deception\\\\'\n\n label = []\n\n configfiles = [os.path.join(subdir, f)\n for subdir, dirs, files in os.walk(path)\n for f in fnmatch.filter(files, '*.txt')]\n\n for f in configfiles:\n c = re.search('(trut|deceptiv)\\w', f)\n label.append(c.group())\n\n labels = pd.DataFrame(label, columns=['Labels'])\n\n review = []\n directory = os.path.join(path)\n for subdir, dirs, files in os.walk(directory):\n for file in files:\n if fnmatch.filter(files, '*.txt'):\n f = open(os.path.join(subdir, file), 'r')\n a = f.read()\n review.append(a)\n\n reviews = pd.DataFrame(review, columns=['HotelReviews'])\n\n result = pd.merge(reviews, labels, right_index=True, left_index=True)\n result['HotelReviews'] = result['HotelReviews'].map(lambda x: x.lower())\n\n stop = stopwords.words('english')\n result['review_without_stopwords'] = result['HotelReviews'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))\n\n os = result.review_without_stopwords.apply(pos)\n os1 = pd.DataFrame(os)\n\n os1['pos'] = os1['review_without_stopwords'].map(lambda x: \" \".join([\"/\".join(x) for x in x]))\n\n result = result = pd.merge(result, os1, right_index=True, left_index=True)\n\n review_train, review_test, label_train, label_test = train_test_split(result['pos'], result['Labels'], test_size=0.2, random_state=17)\n\n tf_vect = TfidfVectorizer(lowercase=True, use_idf=True, smooth_idf=True, sublinear_tf=False)\n X_train_tf = tf_vect.fit_transform(review_train)\n X_test_tf = tf_vect.transform(review_test)\n\n parameters = svc_param_selection(X_train_tf, label_train, 5)\n\n clf = svm.SVC(C=parameters['C'], gamma=parameters['gamma'], kernel='linear')\n clf.fit(X_train_tf, label_train)\n pred = clf.predict(X_test_tf)\n\n with open('deceptive_vectorizer.pickle', 'wb') as fin:\n pickle.dump(tf_vect, fin)\n\n with open('deceptive_mlmodel.pickle', 'wb') as f:\n pickle.dump(clf, f)\n\n X_test_tf = tf_vect.transform(review_test)\n pred = clf.predict(X_test_tf)\n\n print(accuracy_score(label_test, pred))\n print(confusion_matrix(label_test, pred))\n print(classification_report(label_test, pred))\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"pandas.merge",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.metrics.confusion_matrix",
"sklearn.svm.SVC",
"sklearn.metrics.classification_report",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.metrics.accuracy_score"
]
] |
dieterich-lab/pybio-utils
|
[
"4cb5e987d19f68b48b03c6cc5f64d1177885b0fa"
] |
[
"pbio/utils/gffread_utils.py"
] |
[
"import collections\nimport logging\nimport re\n\nlogger = logging.getLogger(__name__)\n\n# build up the header regular expression\ntranscript_id_re = r'(?P<transcript_id>\\S+)'\ngene_re = r\"(?:gene=(?P<gene>\\S+))?\"\ncds_re = r\"(?:CDS=(?P<cds>\\S+))?\"\nloc_re = r\"loc:(?P<seqname>\\S+)\\|(?P<seq_pos>\\S+)\\|(?P<strand>[+-.])\"\nexons_re = r\"exons:(?P<exons>\\S*)\"\nsegs_re = r\"segs:(?P<segs>\\S*)\"\n\n# there is no need for the initial \">\" because pyfasta strips it\nheader_re = r\"{}\\s*{}\\s*{}\\s*{}\\s*{}\\s*{}\".format(transcript_id_re,\n gene_re,\n cds_re,\n loc_re,\n exons_re,\n segs_re)\n\nheader_re = re.compile(header_re)\n\ndefault_novel_id_re = \"\"\n\nfasta_header = collections.namedtuple(\n \"fasta_header\",\n \"transcript_id,gene,cds_start,cds_end,seqname,strand,\"\n \"exon_starts,exon_ends,seg_starts,seg_ends\"\n)\n\n\ndef get_starts_ends(coords):\n \"\"\" This function parses gffread-style coordinate strings into a pair of arrays.\n The first array contains all of the start positions, and the second contains\n all of the ends.\n\n Args:\n coords (string) : a string of coordinate pairs. For example:\n 1-56,57-261,262-543\n\n Returns:\n np.array : the first coordinate in each pair\n np.array : the second coordinate in each pair\n\n Imports:\n numpy\n \"\"\"\n import numpy as np\n\n s = re.split('-|,', coords)\n starts = np.array(s[0::2], dtype=int)\n ends = np.array(s[1::2], dtype=int)\n return starts, ends\n\ndef parse_header(header):\n \"\"\" This function parses out the exon (genomic) and segment (relative) coordinates\n from the gffread-style fasta header.\n\n Args:\n header (string) : a gffread-style header. For example:\n >ENST00000621500 gene=GPHB5 CDS=58-447 loc:14|63312835-63318879|- exons:63312835-63313116,63317646-63317850,63318824-63318879 segs:1-56,57-261,262-543\n\n exons_segs_re (compiled reg ex) : a compiled regular expression which\n parses the exons as the first \"group\" and the segments as the \n second \"group\"\n\n Returns:\n fasta_header namedtuple, with the following fields:\n transcript_id (str, \"ENST00000621500\")\n gene (str, \"GPHB5\")\n cds_start (int, 58)\n cds_end (int, 447)\n seqname (str, \"14\")\n strand (str, \"-\")\n exon_starts (np.array of ints, the first (absolute) coordinate in each exon)\n exon_ends (np.array of ints, the second (absolute) coordinate in each exon)\n seg_starts (np.array of ints, the first (relative) coordinate in each exon)\n seg_ends (np.array of ints, the second (relative) coordinate in each exon)\n\n Imports:\n numpy (indirectly)\n\n \"\"\"\n global header_re\n\n m = header_re.match(header)\n\n if m is None:\n msg = \"Failed parsing header. Header: '{}'\".format(header)\n raise ValueError(msg)\n\n transcript_id = m.group('transcript_id')\n gene = m.group('gene')\n strand = m.group('strand')\n seqname = m.group('seqname')\n cds = m.group('cds')\n\n exons = m.group('exons')\n segs = m.group('segs')\n \n cds_start = None\n cds_end = None\n if cds is not None:\n cds_start, cds_end = get_starts_ends(cds)\n cds_start = cds_start[0]\n cds_end = cds_end[0]\n \n exon_starts, exon_ends = get_starts_ends(exons)\n seg_starts, seg_ends = get_starts_ends(segs)\n \n h = fasta_header(\n transcript_id=transcript_id,\n gene=gene,\n cds_start=cds_start,\n cds_end=cds_end,\n seqname=seqname,\n strand=strand,\n exon_starts=exon_starts,\n exon_ends=exon_ends,\n seg_starts=seg_starts,\n seg_ends=seg_ends\n )\n \n return h\n\ndef get_all_headers(transcripts_fasta_file):\n \"\"\" This function extracts the header information from all records in a\n given gffread-like fasta file. It returns them as a data frame. This\n could, for example, be used to extract only the CDS regions.\n\n Also, see the documentation for parse_header for more information.\n\n Args:\n transcripts_fasta_file (string or file-like): The path to the fasta file, or\n an open file-like object.\n Returns:\n pd.DataFrame: a data frame with the following fields:\n transcript_id (str, \"ENST00000621500\")\n gene (str, \"GPHB5\")\n cds_start (int, 58)\n cds_end (int, 447)\n seqname (str, \"14\")\n strand (str, \"-\")\n exon_starts (np.array of ints, the first (absolute) coordinate in each exon)\n exon_ends (np.array of ints, the second (absolute) coordinate in each exon)\n seg_starts (np.array of ints, the first (relative) coordinate in each exon)\n seg_ends (np.array of ints, the second (relative) coordinate in each exon)\n\n Imports:\n pandas\n misc.bio\n misc.parallel\n \"\"\"\n import pbio.misc.bio as bio\n import pbio.misc.parallel as parallel\n import pandas as pd\n\n msg = \"Parsing all headers\"\n logger.info(msg)\n\n transcripts = bio.get_read_iterator(transcripts_fasta_file)\n \n all_headers = []\n for transcript in transcripts:\n h = parse_header(transcript[0])\n all_headers.append(h)\n\n all_headers = [header for header in all_headers if header is not None]\n all_headers = pd.DataFrame(all_headers)\n all_headers = all_headers.fillna(value=0)\n\n all_headers['cds_start'] = all_headers['cds_start'].astype(int)\n all_headers['cds_end'] = all_headers['cds_end'].astype(int)\n\n return all_headers\n\n\ndef get_utr_info(fasta_record):\n \"\"\" Given a pyfasta entry from a gffread fasta file, this function extracts\n the 5' and 3' UTR lengths and sequences.\n\n Args:\n fasta_entry (tuple): a biopython-like fasta record, which has (at least)\n two elements. The first should be a gffread-like fasta header, and\n the second should be a DNA sequence.\n\n The sequence is not validated, but the header is parsed using the\n parse_header function in this package.\n\n Returns:\n None, if fasta_entry header does not include CDS information\n\n OR\n\n dictionary containing:\n transcript_id (string): the transcript identifer from the \n header (which is everything until the first space)\n\n five_prime_utr_len, three_prime_utr_len (ints): the length of\n the respective UTRs\n\n five_prime_utr, three_prime_utr (strings): the respect UTR\n sequences\n\n Imports:\n numpy (indirectly)\n \"\"\"\n header = parse_header(fasta_record[0])\n if header.cds_start is None:\n return None\n \n five_prime_utr_len = header.cds_start - 1\n five_prime_utr = fasta_record[1][:five_prime_utr_len]\n \n # we subtract three because gffread includes the stop codon\n three_prime_utr_len = header.seg_ends[-1] - header.cds_end - 3\n if three_prime_utr_len < 0:\n three_prime_utr_len = 0\n three_prime_utr = \"\"\n else:\n three_prime_utr = fasta_record[1][-1*three_prime_utr_len:]\n \n \n utr_info = {\n 'transcript_id': header.transcript_id,\n 'five_prime_utr_len': five_prime_utr_len,\n 'three_prime_utr_len': three_prime_utr_len,\n 'five_prime_utr': five_prime_utr,\n 'three_prime_utr': three_prime_utr\n }\n \n return utr_info\n\ndef get_all_utrs(transcripts_fasta_file, progress_bar=False):\n \"\"\" This function extracts all of the annotated UTRs from the gffread-like\n transcripts file. In particular, UTRs are called as the parts of\n transcripts which occur before and after annotated CDSs. Transcripts\n with no annotated CDS region do not have UTRs according to this\n definition. The result is returned as a pandas data frame.\n\n Args:\n transcripts_fasta_file (string): the path to the gffread-like\n transcripts fasta file\n\n progress_bar (bool): whether to show a progress bar. Note that this\n requires iterating through the transcripts file twice to count\n the number of reads in the file.\n\n Returns:\n pd.DataFrame: a data frame with the following columns:\n \n transcript_id (string): the transcript identifer from the \n header (which is everything until the first space)\n\n five_prime_utr_len, three_prime_utr_len (ints): the length of\n the respective UTRs\n\n five_prime_utr, three_prime_utr (strings): the respect UTR\n sequences\n\n Imports:\n misc.parallel\n misc.bio\n Bio.SeqIO.FastaIO (from biopython)\n pandas\n\n gzip, if the fasta file is gzipped\n tqdm, if a progress bar is used\n \n \"\"\"\n import pbio.misc.parallel as parallel\n import pbio.utils.fastx_utils as fastx_utils\n import pandas as pd\n\n if progress_bar:\n msg = \"Counting sequences in file\"\n logger.info(msg)\n read_count = fastx_utils.get_read_count(transcripts_fasta_file)\n else:\n read_count = None\n\n msg = \"Extracting UTR sequences\"\n logger.info(msg)\n\n transcripts = fastx_utils.get_read_iterator(transcripts_fasta_file)\n all_utr_info = parallel.apply_iter_simple(transcripts, get_utr_info, \n progress_bar=progress_bar, total=read_count)\n\n all_utr_info = [utr_info for utr_info in all_utr_info if utr_info is not None]\n all_utr_info_df = pd.DataFrame(all_utr_info)\n\n return all_utr_info_df\n\n\n"
] |
[
[
"numpy.array",
"pandas.DataFrame"
]
] |
KhalidAlt/datasets
|
[
"f013aff823b831fb48b458a7bbb3bb11c2981326"
] |
[
"src/datasets/arrow_dataset.py"
] |
[
"# Copyright 2020 The HuggingFace Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\" Simple Dataset wrapping an Arrow Table.\"\"\"\n\nimport contextlib\nimport copy\nimport json\nimport os\nimport shutil\nimport tempfile\nimport weakref\nfrom collections import Counter, UserDict\nfrom collections.abc import Mapping\nfrom copy import deepcopy\nfrom dataclasses import asdict\nfrom functools import partial, wraps\nfrom io import BytesIO\nfrom math import ceil, floor\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n BinaryIO,\n Callable,\n Dict,\n Iterable,\n Iterator,\n List,\n Optional,\n Tuple,\n Union,\n overload,\n)\n\nimport fsspec\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.compute as pc\nfrom huggingface_hub import HfApi, HfFolder\nfrom multiprocess import Pool, RLock\nfrom requests import HTTPError\nfrom tqdm.auto import tqdm\n\nfrom . import config, utils\nfrom .arrow_reader import ArrowReader\nfrom .arrow_writer import ArrowWriter, OptimizedTypedSequence\nfrom .features import (\n Audio,\n ClassLabel,\n Features,\n FeatureType,\n Image,\n Sequence,\n Value,\n _ArrayXD,\n decode_nested_example,\n pandas_types_mapper,\n require_decoding,\n)\nfrom .filesystems import extract_path_from_uri, is_remote_filesystem\nfrom .fingerprint import (\n fingerprint_transform,\n generate_fingerprint,\n generate_random_fingerprint,\n get_temporary_cache_files_directory,\n is_caching_enabled,\n maybe_register_dataset_for_temp_dir_deletion,\n update_fingerprint,\n)\nfrom .formatting import format_table, get_format_type_from_alias, get_formatter, query_table\nfrom .info import DatasetInfo\nfrom .search import IndexableMixin\nfrom .splits import NamedSplit, Split, SplitInfo\nfrom .table import (\n InMemoryTable,\n MemoryMappedTable,\n Table,\n cast_table_to_features,\n concat_tables,\n list_table_cache_files,\n table_cast,\n table_visitor,\n)\nfrom .tasks import TaskTemplate\nfrom .utils import logging\nfrom .utils.file_utils import estimate_dataset_size\nfrom .utils.info_utils import is_small_dataset\nfrom .utils.py_utils import temporary_assignment, unique_values\nfrom .utils.streaming_download_manager import xgetsize\nfrom .utils.typing import PathLike\n\n\nif TYPE_CHECKING:\n from .dataset_dict import DatasetDict\n\nlogger = logging.get_logger(__name__)\n\n\nclass LazyDict(UserDict):\n def __init__(self, data, features=None):\n self.data = data\n self.features = (\n {key: feature for key, feature in features.items() if features._column_requires_decoding[key]}\n if features\n else {}\n )\n\n\nclass Example(LazyDict):\n def __getitem__(self, key):\n value = super().__getitem__(key)\n if self.features and key in self.features:\n value = decode_nested_example(self.features[key], value) if value is not None else None\n self[key] = value\n del self.features[key]\n return value\n\n\nclass Batch(LazyDict):\n def __getitem__(self, key):\n values = super().__getitem__(key)\n if self.features and key in self.features:\n values = [\n decode_nested_example(self.features[key], value) if value is not None else None for value in values\n ]\n self[key] = values\n del self.features[key]\n return values\n\n\nclass DatasetInfoMixin:\n \"\"\"This base class exposes some attributes of DatasetInfo\n at the base level of the Dataset for easy access.\n \"\"\"\n\n def __init__(self, info: DatasetInfo, split: Optional[NamedSplit]):\n self._info = info\n self._split = split\n\n @property\n def info(self):\n \"\"\":class:`datasets.DatasetInfo` object containing all the metadata in the dataset.\"\"\"\n return self._info\n\n @property\n def split(self):\n \"\"\":class:`datasets.NamedSplit` object corresponding to a named dataset split.\"\"\"\n return self._split\n\n @property\n def builder_name(self) -> str:\n return self._info.builder_name\n\n @property\n def citation(self) -> str:\n return self._info.citation\n\n @property\n def config_name(self) -> str:\n return self._info.config_name\n\n @property\n def dataset_size(self) -> Optional[int]:\n return self._info.dataset_size\n\n @property\n def description(self) -> str:\n return self._info.description\n\n @property\n def download_checksums(self) -> Optional[dict]:\n return self._info.download_checksums\n\n @property\n def download_size(self) -> Optional[int]:\n return self._info.download_size\n\n @property\n def features(self) -> Features:\n return self._info.features\n\n @property\n def homepage(self) -> Optional[str]:\n return self._info.homepage\n\n @property\n def license(self) -> Optional[str]:\n return self._info.license\n\n @property\n def size_in_bytes(self) -> Optional[int]:\n return self._info.size_in_bytes\n\n @property\n def supervised_keys(self):\n return self._info.supervised_keys\n\n @property\n def task_templates(self):\n return self._info.task_templates\n\n @property\n def version(self):\n return self._info.version\n\n\nclass TensorflowDatasetMixin:\n _TF_DATASET_REFS = set()\n\n @staticmethod\n def _get_output_signature(dataset: \"Dataset\", collate_fn: Callable, collate_fn_args: dict, batch_size: int):\n \"\"\"Private method used by `to_tf_dataset()` to find the shapes and dtypes of samples from this dataset\n after being passed through the collate_fn.\n\n Args:\n dataset (:obj:`Dataset`): Dataset to load samples from.\n collate_fn(:obj:`bool`): Shuffle the dataset order when loading. Recommended True for training, False for\n validation/evaluation.\n collate_fn(:obj:`Callable`): A function or callable object (such as a `DataCollator`) that will collate\n lists of samples into a batch.\n collate_fn_args (:obj:`Dict`): A `dict` of keyword arguments to be passed to the\n `collate_fn`.\n batch_size (:obj:`int`): The size of batches loaded from the dataset. Used for shape inference.\n\n Returns:\n :obj:`dict`: Dict mapping column names to tf dtypes\n :obj:`dict`: Dict mapping column names to tf.TensorSpec objects\n \"\"\"\n if config.TF_AVAILABLE:\n import tensorflow as tf\n else:\n raise ImportError(\"Called a Tensorflow-specific function but Tensorflow is not installed.\")\n\n # Tensorflow needs an exact signature for tf.numpy_function, so\n # we need to figure out what's coming back in advance. The only way to do this is to run a test batch -\n # the collator may add columns, so we can't figure it out just by inspecting the dataset.\n if len(dataset) == 0:\n raise ValueError(\"Unable to get the output signature because the dataset is empty.\")\n test_batch_size = min(len(dataset), 4)\n test_batch = dataset[:test_batch_size]\n test_batch = [{key: value[i] for key, value in test_batch.items()} for i in range(test_batch_size)]\n test_batch = collate_fn(test_batch, **collate_fn_args)\n columns_to_dtypes = {}\n for key, array in test_batch.items():\n # In case the collate_fn returns something strange\n array = np.array(test_batch[key])\n if np.issubdtype(array.dtype, np.integer) or array.dtype == np.bool:\n cast_dtype = np.int64\n elif np.issubdtype(array.dtype, np.number):\n cast_dtype = np.float32\n else:\n continue # Probably a string, but whatever it is will cause Tensorflow to shit the bed, so drop it\n columns_to_dtypes[key] = cast_dtype\n\n signatures = {}\n for column, col_feature in dataset.features.items():\n if column not in columns_to_dtypes:\n continue\n shape = []\n shape_feature = col_feature\n while not isinstance(shape_feature, (Value, ClassLabel)):\n if isinstance(shape_feature, _ArrayXD):\n shape.extend(list(shape_feature.shape))\n break\n elif isinstance(shape_feature, Sequence):\n shape.insert(0, shape_feature.length)\n shape_feature = shape_feature.feature\n else:\n raise ValueError(\n f\"Couldn't parse feature {column} with type {type(col_feature)}! \"\n \"This may indicate a column was included with an unusual datatype \"\n \"that we were unable to process correctly. \"\n \"If you're getting this error with one of our datasets, and you're \"\n \"sure the column should be convertable to tf.Tensor, please \"\n \"file an issue at github.com/huggingface/datasets and tag \"\n \"@rocketknight1.\"\n )\n shape = [batch_size] + shape\n shape = [dim if dim != -1 else None for dim in shape]\n\n signatures[column] = tf.TensorSpec(shape=shape, dtype=tf.dtypes.as_dtype(columns_to_dtypes[column]))\n\n # Catching columns added by the collate_fn, such as MLM labels\n for column, tensor in test_batch.items():\n if column in signatures:\n continue\n if column.startswith(\"label\"):\n if \"input_ids\" in signatures and test_batch[column].shape == test_batch[\"input_ids\"].shape:\n shape = signatures[\"input_ids\"].shape\n else:\n # If this doesn't look like LM labels that got added by the collate_fn, let's not say anything\n # about the dimensions we're unsure of\n shape = [batch_size] + [None for dim in tensor.shape.as_list()[1:]]\n else:\n # If this doesn't look like LM labels that got added by the collate_fn, let's not say anything\n # about the dimensions we're unsure of\n shape = [batch_size] + [None for dim in tensor.shape.as_list()[1:]]\n signatures[column] = tf.TensorSpec(shape=shape, dtype=tensor.dtype)\n return columns_to_dtypes, signatures\n\n def to_tf_dataset(\n self,\n columns: Union[str, List[str]],\n batch_size: int,\n shuffle: bool,\n collate_fn: Callable,\n drop_remainder: bool = None,\n collate_fn_args: Dict[str, Any] = None,\n label_cols: Union[str, List[str]] = None,\n dummy_labels: bool = False,\n prefetch: bool = True,\n ):\n \"\"\"Create a tf.data.Dataset from the underlying Dataset. This tf.data.Dataset will load and collate batches from\n the Dataset, and is suitable for passing to methods like model.fit() or model.predict().\n\n Args:\n columns (:obj:`List[str]` or :obj:`str`): Dataset column(s) to load in the tf.data.Dataset. In general,\n only columns that the model can use as input should be included here (numeric data only).\n batch_size (:obj:`int`): Size of batches to load from the dataset.\n shuffle(:obj:`bool`): Shuffle the dataset order when loading. Recommended True for training, False for\n validation/evaluation.\n drop_remainder(:obj:`bool`, default ``None``): Drop the last incomplete batch when loading. If not provided,\n defaults to the same setting as shuffle.\n collate_fn(:obj:`Callable`): A function or callable object (such as a `DataCollator`) that will collate\n lists of samples into a batch.\n collate_fn_args (:obj:`Dict`, optional): An optional `dict` of keyword arguments to be passed to the\n `collate_fn`.\n label_cols (:obj:`List[str]` or :obj:`str`, default ``None``): Dataset column(s) to load as\n labels. Note that many models compute loss internally rather than letting Keras do it, in which case it is\n not necessary to actually pass the labels here, as long as they're in the input `columns`.\n dummy_labels (:obj:`bool`, default ``False``): If no `label_cols` are set, output an array of \"dummy\" labels\n with each batch. This can avoid problems with `fit()` or `train_on_batch()` that expect labels to be\n a Tensor or np.ndarray, but should (hopefully) not be necessary with our standard train_step().\n prefetch (:obj:`bool`, default ``True``): Whether to run the dataloader in a separate thread and maintain\n a small buffer of batches for training. Improves performance by allowing data to be loaded in the\n background while the model is training.\n\n Returns:\n :class:`tf.data.Dataset`\n \"\"\"\n\n if config.TF_AVAILABLE:\n import tensorflow as tf\n else:\n raise ImportError(\"Called a Tensorflow-specific function but Tensorflow is not installed.\")\n\n if collate_fn_args is None:\n collate_fn_args = {}\n\n if label_cols is None:\n label_cols = []\n elif isinstance(label_cols, str):\n label_cols = [label_cols]\n elif len(set(label_cols)) < len(label_cols):\n raise ValueError(\"List of label_cols contains duplicates.\")\n if not columns:\n raise ValueError(\"Need to specify at least one column.\")\n elif isinstance(columns, str):\n columns = [columns]\n elif len(set(columns)) < len(columns):\n raise ValueError(\"List of columns contains duplicates.\")\n if label_cols is not None:\n cols_to_retain = columns + label_cols\n else:\n cols_to_retain = columns\n if \"label\" in cols_to_retain or \"labels\" in cols_to_retain or \"label_ids\" in cols_to_retain:\n cols_to_retain += [\"labels\", \"label\", \"label_ids\"] # Don't accidentally drop any labels with other names!\n cols_to_retain = list(set(cols_to_retain)) # Remove any duplicates\n\n if drop_remainder is None:\n # We assume that if you're shuffling it's the train set, so we drop the remainder unless told not to\n drop_remainder = shuffle\n\n retained_columns = [key for key in self.features.keys() if key in cols_to_retain]\n dataset = self.with_format(\"numpy\", columns=retained_columns)\n\n columns_to_dtypes, output_signature = self._get_output_signature(\n dataset, collate_fn, collate_fn_args, batch_size=batch_size if drop_remainder else None\n )\n all_columns = list(columns_to_dtypes.keys())\n all_dtypes = list(columns_to_dtypes.values())\n\n def np_get_batch(indices):\n batch = dataset[indices]\n actual_size = len(list(batch.values())[0]) # Get the length of one of the arrays, assume all same\n # Our collators expect a list of dicts, not a dict of lists/arrays, so we invert\n batch = [{key: value[i] for key, value in batch.items()} for i in range(actual_size)]\n batch = collate_fn(batch, **collate_fn_args)\n out_batch = []\n for col, cast_dtype in columns_to_dtypes.items():\n # In case the collate_fn returns something strange\n array = np.array(batch[col])\n array = array.astype(cast_dtype)\n out_batch.append(array)\n return out_batch\n\n @tf.function(input_signature=[tf.TensorSpec(None, tf.int64)])\n def fetch_function(indices):\n output = tf.numpy_function(\n # This works because dictionaries always output in insertion order\n np_get_batch,\n inp=[indices],\n Tout=[tf.dtypes.as_dtype(dtype) for dtype in all_dtypes],\n )\n return {key: output[i] for i, key in enumerate(all_columns)}\n\n tf_dataset = tf.data.Dataset.from_tensor_slices(np.arange(len(dataset), dtype=np.int64))\n\n if shuffle:\n tf_dataset = tf_dataset.shuffle(len(dataset))\n\n def ensure_shapes(input_dict):\n return {key: tf.ensure_shape(val, output_signature[key].shape) for key, val in input_dict.items()}\n\n tf_dataset = tf_dataset.batch(batch_size, drop_remainder=drop_remainder).map(fetch_function).map(ensure_shapes)\n\n if label_cols:\n\n def split_features_and_labels(input_batch):\n if \"labels\" in columns or \"label_ids\" in columns or \"label\" in columns:\n columns.append(\"labels\")\n if \"labels\" in label_cols or \"label_ids\" in label_cols or \"label\" in label_cols:\n label_cols.append(\"labels\")\n # Some data collators add columns, so our logic is that newly added columns should go\n # into the input dict unless the user asked for them in labels instead\n features = {\n key: tensor for key, tensor in input_batch.items() if key in columns or key not in label_cols\n }\n labels = {key: tensor for key, tensor in input_batch.items() if key in label_cols}\n if len(features) == 1:\n features = list(features.values())[0]\n if len(labels) == 1:\n labels = list(labels.values())[0]\n return features, labels\n\n tf_dataset = tf_dataset.map(split_features_and_labels)\n\n elif len(columns) == 1:\n tf_dataset = tf_dataset.map(lambda x: list(x.values())[0])\n\n if dummy_labels and not label_cols:\n\n def add_dummy_labels(input_batch):\n return input_batch, tf.zeros(tf.shape(input_batch[columns[0]])[0])\n\n tf_dataset = tf_dataset.map(add_dummy_labels)\n\n if prefetch:\n tf_dataset = tf_dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\n # Remove a reference to the open Arrow file on delete\n def cleanup_callback(ref):\n dataset.__del__()\n self._TF_DATASET_REFS.remove(ref)\n\n self._TF_DATASET_REFS.add(weakref.ref(tf_dataset, cleanup_callback))\n return tf_dataset\n\n\nclass DatasetTransformationNotAllowedError(Exception):\n pass\n\n\ndef transmit_format(func):\n \"\"\"Wrapper for dataset transforms that recreate a new Dataset to transmit the format of the original dataset to the new dataset\"\"\"\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if args:\n self: \"Dataset\" = args[0]\n args = args[1:]\n else:\n self: \"Dataset\" = kwargs.pop(\"self\")\n # don't use self.format since it returns a list of columns for 'columns' even if self_format_columns is None\n unformatted_columns = set(self.column_names) - set(self._format_columns or [])\n self_format = {\n \"type\": self._format_type,\n \"format_kwargs\": self._format_kwargs,\n \"columns\": self._format_columns,\n \"output_all_columns\": self._output_all_columns,\n }\n # apply actual function\n out: Union[\"Dataset\", \"DatasetDict\"] = func(self, *args, **kwargs)\n datasets: List[\"Dataset\"] = list(out.values()) if isinstance(out, dict) else [out]\n # re-apply format to the output\n for dataset in datasets:\n new_format = self_format.copy()\n if new_format[\"columns\"] is not None: # new formatted columns = (columns - previously unformatted columns)\n # sort the columns to have a deterministic list of columns that we can compare with `out_format`\n new_format[\"columns\"] = sorted(set(dataset.column_names) - unformatted_columns)\n out_format = {\n \"type\": dataset._format_type,\n \"format_kwargs\": dataset._format_kwargs,\n \"columns\": sorted(dataset._format_columns) if dataset._format_columns is not None else None,\n \"output_all_columns\": dataset._output_all_columns,\n }\n if out_format != new_format: # only apply if there's a change not to update the fingerprint for nothing\n dataset.set_format(**new_format)\n return out\n\n wrapper._decorator_name_ = \"transmit_format\"\n return wrapper\n\n\ndef transmit_tasks(func):\n \"\"\"Wrapper for dataset transforms that recreate a new Dataset to transmit the task templates of the original dataset to the new dataset\"\"\"\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if args:\n self: \"Dataset\" = args[0]\n args = args[1:]\n else:\n self: \"Dataset\" = kwargs.pop(\"self\")\n # apply actual function\n out: Union[\"Dataset\", \"DatasetDict\"] = func(self, *args, **kwargs)\n datasets: List[\"Dataset\"] = list(out.values()) if isinstance(out, dict) else [out]\n for dataset in datasets:\n # Remove task templates if a column mapping of the template is no longer valid\n if self.info.task_templates is not None:\n dataset.info.task_templates = [\n template\n for template in self.info.task_templates\n if all(dataset.features.get(k) == self.features.get(k) for k in template.column_mapping.keys())\n ]\n return out\n\n wrapper._decorator_name_ = \"transmit_tasks\"\n return wrapper\n\n\ndef update_metadata_with_features(table: Table, features: Features):\n \"\"\"To be used in dataset transforms that modify the features of the dataset, in order to update the features stored in the metadata of its schema.\"\"\"\n features = Features({col_name: features[col_name] for col_name in table.column_names})\n if table.schema.metadata is None or b\"huggingface\" not in table.schema.metadata:\n pa_metadata = ArrowWriter._build_metadata(DatasetInfo(features=features))\n else:\n metadata = json.loads(table.schema.metadata[b\"huggingface\"].decode())\n if \"info\" not in metadata:\n metadata[\"info\"] = asdict(DatasetInfo(features=features))\n else:\n metadata[\"info\"][\"features\"] = asdict(DatasetInfo(features=features))[\"features\"]\n pa_metadata = {\"huggingface\": json.dumps(metadata)}\n table = table.replace_schema_metadata(pa_metadata)\n return table\n\n\ndef _check_table(table) -> Table:\n \"\"\"We check the table type to make sure it's an instance of :class:`datasets.table.Table`\"\"\"\n if isinstance(table, pa.Table):\n # for a pyarrow table, we can just consider it as a in-memory table\n # this is here for backward compatibility\n return InMemoryTable(table)\n elif isinstance(table, Table):\n return table\n else:\n raise TypeError(f\"Expected a pyarrow.Table or a datasets.table.Table object, but got {table}.\")\n\n\ndef _check_column_names(column_names: List[str]):\n \"\"\"Check the column names to make sure they don't contain duplicates.\"\"\"\n counter = Counter(column_names)\n if not all(count == 1 for count in counter.values()):\n duplicated_columns = [col for col in counter if counter[col] > 1]\n raise ValueError(f\"The table can't have duplicated columns but columns {duplicated_columns} are duplicated.\")\n\n\ndef _check_valid_indices_value(value, size):\n if (value < 0 and value + size < 0) or (value >= size):\n raise IndexError(\n f\"Invalid value {value} in indices iterable. All values must be within range [-{size}, {size - 1}].\"\n )\n\n\ndef _check_if_features_can_be_aligned(features_list: List[Features]):\n \"\"\"Check if the dictionaries of features can be aligned.\n\n Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value(\"null\")`.\n \"\"\"\n name2feature = {}\n for features in features_list:\n for k, v in features.items():\n if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == \"null\"):\n name2feature[k] = v\n\n for features in features_list:\n for k, v in features.items():\n if not (isinstance(v, Value) and v.dtype == \"null\") and name2feature[k] != v:\n raise ValueError(\n f'The features can\\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value(\"null\").'\n )\n\n\ndef _align_features(features_list: List[Features]) -> List[Features]:\n \"\"\"Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature.\"\"\"\n name2feature = {}\n for features in features_list:\n for k, v in features.items():\n if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == \"null\"):\n name2feature[k] = v\n\n return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list]\n\n\nclass NonExistentDatasetError(Exception):\n \"\"\"Used when we expect the existence of a dataset\"\"\"\n\n pass\n\n\nclass Dataset(DatasetInfoMixin, IndexableMixin, TensorflowDatasetMixin):\n \"\"\"A Dataset backed by an Arrow table.\"\"\"\n\n def __init__(\n self,\n arrow_table: Table,\n info: Optional[DatasetInfo] = None,\n split: Optional[NamedSplit] = None,\n indices_table: Optional[Table] = None,\n fingerprint: Optional[str] = None,\n ):\n info = info.copy() if info is not None else DatasetInfo()\n DatasetInfoMixin.__init__(self, info=info, split=split)\n IndexableMixin.__init__(self)\n\n self._data: Table = _check_table(arrow_table)\n self._indices: Optional[Table] = _check_table(indices_table) if indices_table is not None else None\n maybe_register_dataset_for_temp_dir_deletion(self)\n\n self._format_type: Optional[str] = None\n self._format_kwargs: dict = {}\n self._format_columns: Optional[list] = None\n self._output_all_columns: bool = False\n self._fingerprint: str = fingerprint\n\n # Read metadata\n\n if self._data.schema.metadata is not None and b\"huggingface\" in self._data.schema.metadata:\n metadata = json.loads(self._data.schema.metadata[b\"huggingface\"].decode())\n if (\n \"fingerprint\" in metadata and self._fingerprint is None\n ): # try to load fingerprint from the arrow file metadata\n self._fingerprint = metadata[\"fingerprint\"]\n\n # Infer features if None\n inferred_features = Features.from_arrow_schema(arrow_table.schema)\n if self.info.features is None:\n self.info.features = inferred_features\n else: # make sure the nested columns are in the right order\n self.info.features = self.info.features.reorder_fields_as(inferred_features)\n\n # Infer fingerprint if None\n\n if self._fingerprint is None:\n self._fingerprint = generate_fingerprint(self)\n\n # Sanity checks\n\n if self.features is None:\n raise ValueError(\"Features can't be None in a Dataset object\")\n if self._fingerprint is None:\n raise ValueError(\"Fingerprint can't be None in a Dataset object\")\n if self.info.features.type != inferred_features.type:\n raise ValueError(\n f\"External features info don't match the dataset:\\nGot\\n{self.info.features}\\nwith type\\n{self.info.features.type}\\n\\nbut expected something like\\n{inferred_features}\\nwith type\\n{inferred_features.type}\"\n )\n\n if self._indices is not None:\n if not pa.types.is_unsigned_integer(self._indices.column(0)[0].type):\n raise ValueError(\n f\"indices must be an Arrow table of unsigned integers, current type is {self._indices.column(0)[0].type}\"\n )\n _check_column_names(self._data.column_names)\n\n self._data = update_metadata_with_features(self._data, self.features)\n\n @classmethod\n def from_file(\n cls,\n filename: str,\n info: Optional[DatasetInfo] = None,\n split: Optional[NamedSplit] = None,\n indices_filename: Optional[str] = None,\n in_memory: bool = False,\n ) -> \"Dataset\":\n \"\"\"Instantiate a Dataset backed by an Arrow table at filename.\n\n Args:\n filename (:obj:`str`): File name of the dataset.\n info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc.\n split (:class:`NamedSplit`, optional): Name of the dataset split.\n indices_filename (:obj:`str`, optional): File names of the indices.\n in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory.\n\n Returns:\n :class:`Dataset`\n \"\"\"\n table = ArrowReader.read_table(filename, in_memory=in_memory)\n\n if indices_filename is not None:\n indices_pa_table = ArrowReader.read_table(indices_filename, in_memory=in_memory)\n else:\n indices_pa_table = None\n\n return cls(\n arrow_table=table,\n info=info,\n split=split,\n indices_table=indices_pa_table,\n )\n\n @classmethod\n def from_buffer(\n cls,\n buffer: pa.Buffer,\n info: Optional[DatasetInfo] = None,\n split: Optional[NamedSplit] = None,\n indices_buffer: Optional[pa.Buffer] = None,\n ) -> \"Dataset\":\n \"\"\"Instantiate a Dataset backed by an Arrow buffer.\n\n Args:\n buffer (:obj:`pyarrow.Buffer`): Arrow buffer.\n info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc.\n split (:class:`NamedSplit`, optional): Name of the dataset split.\n indices_buffer (:obj:`pyarrow.Buffer`, optional): Indices Arrow buffer.\n\n Returns:\n :class:`Dataset`\n \"\"\"\n table = InMemoryTable.from_buffer(buffer)\n\n if indices_buffer is not None:\n indices_table = InMemoryTable.from_buffer(buffer)\n else:\n indices_table = None\n\n return cls(table, info=info, split=split, indices_table=indices_table)\n\n @classmethod\n def from_pandas(\n cls,\n df: pd.DataFrame,\n features: Optional[Features] = None,\n info: Optional[DatasetInfo] = None,\n split: Optional[NamedSplit] = None,\n preserve_index: Optional[bool] = None,\n ) -> \"Dataset\":\n \"\"\"\n Convert :obj:`pandas.DataFrame` to a :obj:`pyarrow.Table` to create a :class:`Dataset`.\n\n The column types in the resulting Arrow Table are inferred from the dtypes of the pandas.Series in the\n DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the\n case of `object`, we need to guess the datatype by looking at the Python objects in this Series.\n\n Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow\n type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only\n contains None/nan objects, the type is set to null. This behavior can be avoided by constructing explicit\n features and passing it to this function.\n\n Args:\n df (:obj:`pandas.DataFrame`): Dataframe that contains the dataset.\n features (:class:`Features`, optional): Dataset features.\n info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc.\n split (:class:`NamedSplit`, optional): Name of the dataset split.\n preserve_index (:obj:`bool`, optional): Whether to store the index as an additional column in the resulting Dataset.\n The default of None will store the index as a column, except for RangeIndex which is stored as metadata only.\n Use preserve_index=True to force it to be stored as a column.\n\n Returns:\n :class:`Dataset`\n \"\"\"\n if info is not None and features is not None and info.features != features:\n raise ValueError(\n f\"Features specified in `features` and `info.features` can't be different:\\n{features}\\n{info.features}\"\n )\n features = features if features is not None else info.features if info is not None else None\n if info is None:\n info = DatasetInfo()\n info.features = features\n table = InMemoryTable.from_pandas(\n df=df, preserve_index=preserve_index, schema=features.arrow_schema if features is not None else None\n )\n return cls(table, info=info, split=split)\n\n @classmethod\n def from_dict(\n cls,\n mapping: dict,\n features: Optional[Features] = None,\n info: Optional[Any] = None,\n split: Optional[Any] = None,\n ) -> \"Dataset\":\n \"\"\"\n Convert :obj:`dict` to a :obj:`pyarrow.Table` to create a :class:`Dataset`.\n\n Args:\n mapping (:obj:`Mapping`): Mapping of strings to Arrays or Python lists.\n features (:class:`Features`, optional): Dataset features.\n info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc.\n split (:class:`NamedSplit`, optional): Name of the dataset split.\n\n Returns:\n :class:`Dataset`\n \"\"\"\n if info is not None and features is not None and info.features != features:\n raise ValueError(\n f\"Features specified in `features` and `info.features` can't be different:\\n{features}\\n{info.features}\"\n )\n features = features if features is not None else info.features if info is not None else None\n if info is None:\n info = DatasetInfo()\n info.features = features\n if features is not None:\n mapping = features.encode_batch(mapping)\n mapping = {\n col: OptimizedTypedSequence(data, type=features[col] if features is not None else None, col=col)\n for col, data in mapping.items()\n }\n pa_table = InMemoryTable.from_pydict(mapping=mapping)\n if info.features is None:\n info.features = Features({col: ts.get_inferred_type() for col, ts in mapping.items()})\n return cls(pa_table, info=info, split=split)\n\n @staticmethod\n def from_csv(\n path_or_paths: Union[PathLike, List[PathLike]],\n split: Optional[NamedSplit] = None,\n features: Optional[Features] = None,\n cache_dir: str = None,\n keep_in_memory: bool = False,\n **kwargs,\n ):\n \"\"\"Create Dataset from CSV file(s).\n\n Args:\n path_or_paths (path-like or list of path-like): Path(s) of the CSV file(s).\n split (:class:`NamedSplit`, optional): Split name to be assigned to the dataset.\n features (:class:`Features`, optional): Dataset features.\n cache_dir (:obj:`str`, optional, default ``\"~/.cache/huggingface/datasets\"``): Directory to cache data.\n keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory.\n **kwargs: Keyword arguments to be passed to :meth:`pandas.read_csv`.\n\n Returns:\n :class:`Dataset`\n \"\"\"\n # Dynamic import to avoid circular dependency\n from .io.csv import CsvDatasetReader\n\n return CsvDatasetReader(\n path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs\n ).read()\n\n @staticmethod\n def from_json(\n path_or_paths: Union[PathLike, List[PathLike]],\n split: Optional[NamedSplit] = None,\n features: Optional[Features] = None,\n cache_dir: str = None,\n keep_in_memory: bool = False,\n field: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"Create Dataset from JSON or JSON Lines file(s).\n\n Args:\n path_or_paths (path-like or list of path-like): Path(s) of the JSON or JSON Lines file(s).\n split (:class:`NamedSplit`, optional): Split name to be assigned to the dataset.\n features (:class:`Features`, optional): Dataset features.\n cache_dir (:obj:`str`, optional, default ``\"~/.cache/huggingface/datasets\"``): Directory to cache data.\n keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory.\n field (:obj:`str`, optional): Field name of the JSON file where the dataset is contained in.\n **kwargs: Keyword arguments to be passed to :class:`JsonConfig`.\n\n Returns:\n :class:`Dataset`\n \"\"\"\n # Dynamic import to avoid circular dependency\n from .io.json import JsonDatasetReader\n\n return JsonDatasetReader(\n path_or_paths,\n split=split,\n features=features,\n cache_dir=cache_dir,\n keep_in_memory=keep_in_memory,\n field=field,\n **kwargs,\n ).read()\n\n @staticmethod\n def from_parquet(\n path_or_paths: Union[PathLike, List[PathLike]],\n split: Optional[NamedSplit] = None,\n features: Optional[Features] = None,\n cache_dir: str = None,\n keep_in_memory: bool = False,\n columns: Optional[List[str]] = None,\n **kwargs,\n ):\n \"\"\"Create Dataset from Parquet file(s).\n\n Args:\n path_or_paths (path-like or list of path-like): Path(s) of the Parquet file(s).\n split (:class:`NamedSplit`, optional): Split name to be assigned to the dataset.\n features (:class:`Features`, optional): Dataset features.\n cache_dir (:obj:`str`, optional, default ``\"~/.cache/huggingface/datasets\"``): Directory to cache data.\n keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory.\n columns (:obj:`List[str]`, optional): If not None, only these columns will be read from the file.\n A column name may be a prefix of a nested field, e.g. 'a' will select\n 'a.b', 'a.c', and 'a.d.e'.\n **kwargs: Keyword arguments to be passed to :class:`ParquetConfig`.\n\n Returns:\n :class:`Dataset`\n \"\"\"\n # Dynamic import to avoid circular dependency\n from .io.parquet import ParquetDatasetReader\n\n return ParquetDatasetReader(\n path_or_paths,\n split=split,\n features=features,\n cache_dir=cache_dir,\n keep_in_memory=keep_in_memory,\n columns=columns,\n **kwargs,\n ).read()\n\n @staticmethod\n def from_text(\n path_or_paths: Union[PathLike, List[PathLike]],\n split: Optional[NamedSplit] = None,\n features: Optional[Features] = None,\n cache_dir: str = None,\n keep_in_memory: bool = False,\n **kwargs,\n ):\n \"\"\"Create Dataset from text file(s).\n\n Args:\n path_or_paths (path-like or list of path-like): Path(s) of the text file(s).\n split (:class:`NamedSplit`, optional): Split name to be assigned to the dataset.\n features (:class:`Features`, optional): Dataset features.\n cache_dir (:obj:`str`, optional, default ``\"~/.cache/huggingface/datasets\"``): Directory to cache data.\n keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory.\n **kwargs: Keyword arguments to be passed to :class:`TextConfig`.\n\n Returns:\n :class:`Dataset`\n \"\"\"\n # Dynamic import to avoid circular dependency\n from .io.text import TextDatasetReader\n\n return TextDatasetReader(\n path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs\n ).read()\n\n def __del__(self):\n if hasattr(self, \"_data\"):\n del self._data\n if hasattr(self, \"_indices\"):\n del self._indices\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables\n self.__del__()\n\n def save_to_disk(self, dataset_path: str, fs=None):\n \"\"\"\n Saves a dataset to a dataset directory, or in a filesystem using either :class:`~filesystems.S3FileSystem` or\n any implementation of ``fsspec.spec.AbstractFileSystem``.\n\n Args:\n dataset_path (:obj:`str`): Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`)\n of the dataset directory where the dataset will be saved to.\n fs (:class:`~filesystems.S3FileSystem`, ``fsspec.spec.AbstractFileSystem``, optional, defaults ``None``):\n Instance of the remote filesystem used to download the files from.\n \"\"\"\n if self.list_indexes():\n raise ValueError(\"please remove all the indexes using `dataset.drop_index` before saving a dataset\")\n\n dataset = self.flatten_indices() if self._indices is not None else self\n\n if is_remote_filesystem(fs):\n dataset_path = extract_path_from_uri(dataset_path)\n else:\n fs = fsspec.filesystem(\"file\")\n cache_files_paths = [Path(cache_filename[\"filename\"]) for cache_filename in self.cache_files]\n # Check that the dataset doesn't overwrite iself. It can cause a permission error on Windows and a segfault on linux.\n if Path(dataset_path, config.DATASET_ARROW_FILENAME) in cache_files_paths:\n raise PermissionError(\n f\"Tried to overwrite {Path(dataset_path, config.DATASET_ARROW_FILENAME)} but a dataset can't overwrite itself.\"\n )\n if Path(dataset_path, config.DATASET_INDICES_FILENAME) in cache_files_paths:\n raise PermissionError(\n f\"Tried to overwrite {Path(dataset_path, config.DATASET_INDICES_FILENAME)} but a dataset can't overwrite itself.\"\n )\n\n # Get json serializable state\n state = {\n key: dataset.__dict__[key]\n for key in [\n \"_fingerprint\",\n \"_format_columns\",\n \"_format_kwargs\",\n \"_format_type\",\n \"_indexes\",\n \"_output_all_columns\",\n ]\n }\n\n split = dataset.__dict__[\"_split\"]\n state[\"_split\"] = str(split) if split is not None else split\n\n state[\"_data_files\"] = [{\"filename\": config.DATASET_ARROW_FILENAME}]\n for k in state[\"_format_kwargs\"].keys():\n try:\n json.dumps(state[\"_format_kwargs\"][k])\n except TypeError as e:\n raise TypeError(\n str(e) + f\"\\nThe format kwargs must be JSON serializable, but key '{k}' isn't.\"\n ) from None\n\n # Get json serializable dataset info\n dataset_info = asdict(dataset._info)\n\n # Save dataset + indices + state + info\n fs.makedirs(dataset_path, exist_ok=True)\n with fs.open(Path(dataset_path, config.DATASET_ARROW_FILENAME).as_posix(), \"wb\") as dataset_file:\n with ArrowWriter(stream=dataset_file) as writer:\n writer.write_table(dataset._data)\n writer.finalize()\n with fs.open(\n Path(dataset_path, config.DATASET_STATE_JSON_FILENAME).as_posix(), \"w\", encoding=\"utf-8\"\n ) as state_file:\n json.dump(state, state_file, indent=2, sort_keys=True)\n with fs.open(\n Path(dataset_path, config.DATASET_INFO_FILENAME).as_posix(), \"w\", encoding=\"utf-8\"\n ) as dataset_info_file:\n # Sort only the first level of keys, or we might shuffle fields of nested features if we use sort_keys=True\n sorted_keys_dataset_info = {key: dataset_info[key] for key in sorted(dataset_info)}\n json.dump(sorted_keys_dataset_info, dataset_info_file, indent=2)\n logger.info(f\"Dataset saved in {dataset_path}\")\n\n @staticmethod\n def _build_local_temp_path(uri_or_path: str) -> Path:\n \"\"\"\n Builds and returns a Path concatenating a local temporary dir with the dir path (or absolute/relative\n path extracted from the uri) passed.\n\n Args:\n uri_or_path (:obj:`str`): Path (e.g. `\"dataset/train\"`) or remote URI (e.g.\n `\"s3://my-bucket/dataset/train\"`) to concatenate.\n\n Returns:\n :class:`Path`: the concatenated path (temp dir + path)\n \"\"\"\n src_dataset_path = Path(uri_or_path)\n tmp_dir = get_temporary_cache_files_directory()\n return Path(tmp_dir, src_dataset_path.relative_to(src_dataset_path.anchor))\n\n @staticmethod\n def load_from_disk(dataset_path: str, fs=None, keep_in_memory: Optional[bool] = None) -> \"Dataset\":\n \"\"\"\n Loads a dataset that was previously saved using :meth:`save_to_disk` from a dataset directory, or from a\n filesystem using either :class:`~filesystems.S3FileSystem` or any implementation of\n ``fsspec.spec.AbstractFileSystem``.\n\n Args:\n dataset_path (:obj:`str`): Path (e.g. `\"dataset/train\"`) or remote URI (e.g.\n `\"s3//my-bucket/dataset/train\"`) of the dataset directory where the dataset will be loaded from.\n fs (:class:`~filesystems.S3FileSystem`, ``fsspec.spec.AbstractFileSystem``, optional, default ``None``):\n Instance of the remote filesystem used to download the files from.\n keep_in_memory (:obj:`bool`, default ``None``): Whether to copy the dataset in-memory. If `None`, the\n dataset will not be copied in-memory unless explicitly enabled by setting\n `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the\n :ref:`load_dataset_enhancing_performance` section.\n\n Returns:\n :class:`Dataset` or :class:`DatasetDict`:\n - If `dataset_path` is a path of a dataset directory: the dataset requested.\n - If `dataset_path` is a path of a dataset dict directory: a ``datasets.DatasetDict`` with each split.\n \"\"\"\n # copies file from filesystem if it is remote filesystem to local filesystem and modifies dataset_path to temp directory containing local copies\n fs = fsspec.filesystem(\"file\") if fs is None else fs\n dataset_dict_json_path = Path(dataset_path, config.DATASETDICT_JSON_FILENAME).as_posix()\n dataset_info_path = Path(dataset_path, config.DATASET_INFO_FILENAME).as_posix()\n if not fs.isfile(dataset_info_path) and fs.isfile(dataset_dict_json_path):\n raise FileNotFoundError(\n f\"No such file or directory: '{dataset_info_path}'. Expected to load a Dataset object, but got a DatasetDict. Please use datasets.load_from_disk instead.\"\n )\n\n if is_remote_filesystem(fs):\n src_dataset_path = extract_path_from_uri(dataset_path)\n dataset_path = Dataset._build_local_temp_path(src_dataset_path)\n fs.download(src_dataset_path, dataset_path.as_posix(), recursive=True)\n\n with open(Path(dataset_path, config.DATASET_STATE_JSON_FILENAME).as_posix(), encoding=\"utf-8\") as state_file:\n state = json.load(state_file)\n with open(Path(dataset_path, config.DATASET_INFO_FILENAME).as_posix(), encoding=\"utf-8\") as dataset_info_file:\n dataset_info = DatasetInfo.from_dict(json.load(dataset_info_file))\n\n dataset_size = estimate_dataset_size(\n Path(dataset_path, data_file[\"filename\"]) for data_file in state[\"_data_files\"]\n )\n keep_in_memory = keep_in_memory if keep_in_memory is not None else is_small_dataset(dataset_size)\n table_cls = InMemoryTable if keep_in_memory else MemoryMappedTable\n arrow_table = concat_tables(\n table_cls.from_file(Path(dataset_path, data_file[\"filename\"]).as_posix())\n for data_file in state[\"_data_files\"]\n )\n\n split = state[\"_split\"]\n split = Split(split) if split is not None else split\n\n return Dataset(\n arrow_table=arrow_table,\n info=dataset_info,\n split=split,\n fingerprint=state[\"_fingerprint\"],\n )\n\n @property\n def data(self) -> Table:\n \"\"\"The Apache Arrow table backing the dataset.\"\"\"\n return self._data\n\n @property\n def cache_files(self) -> List[dict]:\n \"\"\"The cache files containing the Apache Arrow table backing the dataset.\"\"\"\n cache_files = list_table_cache_files(self._data)\n if self._indices is not None:\n cache_files += list_table_cache_files(self._indices)\n return [{\"filename\": cache_filename} for cache_filename in cache_files]\n\n @property\n def num_columns(self) -> int:\n \"\"\"Number of columns in the dataset.\"\"\"\n return self._data.num_columns\n\n @property\n def num_rows(self) -> int:\n \"\"\"Number of rows in the dataset (same as :meth:`Dataset.__len__`).\"\"\"\n if self._indices is not None:\n return self._indices.num_rows\n return self._data.num_rows\n\n @property\n def column_names(self) -> List[str]:\n \"\"\"Names of the columns in the dataset.\"\"\"\n return self._data.column_names\n\n @property\n def shape(self) -> Tuple[int, int]:\n \"\"\"Shape of the dataset (number of columns, number of rows).\"\"\"\n if self._indices is not None:\n return (self._indices.num_rows, self._data.num_columns)\n return self._data.shape\n\n def unique(self, column: str) -> List[Any]:\n \"\"\"Return a list of the unique elements in a column.\n\n This is implemented in the low-level backend and as such, very fast.\n\n Args:\n column (:obj:`str`): Column name (list all the column names with :func:`datasets.Dataset.column_names`).\n\n Returns:\n :obj:`list`: List of unique elements in the given column.\n \"\"\"\n if column not in self._data.column_names:\n raise ValueError(f\"Column ({column}) not in table columns ({self._data.column_names}).\")\n\n if self._indices is not None and self._indices.num_rows != self._data.num_rows:\n dataset = self.flatten_indices()\n else:\n dataset = self\n\n return dataset._data.column(column).unique().to_pylist()\n\n def class_encode_column(self, column: str, include_nulls: bool = False) -> \"Dataset\":\n \"\"\"Casts the given column as :obj:``datasets.features.ClassLabel`` and updates the table.\n\n Args:\n column (`str`): The name of the column to cast (list all the column names with :func:`datasets.Dataset.column_names`)\n include_nulls (`bool`, default `False`):\n Whether to include null values in the class labels. If True, the null values will be encoded as the `\"None\"` class label.\n\n .. versionadded:: 1.14.2\n \"\"\"\n # Sanity checks\n if column not in self._data.column_names:\n raise ValueError(f\"Column ({column}) not in table columns ({self._data.column_names}).\")\n src_feat = self.features[column]\n if not isinstance(src_feat, Value):\n raise ValueError(\n f\"Class encoding is only supported for {Value.__name__} column, and column {column} is {type(src_feat).__name__}.\"\n )\n\n if src_feat.dtype != \"string\" or (include_nulls and None in self.unique(column)):\n\n def stringify_column(batch):\n batch[column] = [\n str(sample) if include_nulls or sample is not None else None for sample in batch[column]\n ]\n return batch\n\n dset = self.map(\n stringify_column,\n batched=True,\n desc=\"Stringifying the column\",\n )\n else:\n dset = self\n\n # Create the new feature\n class_names = sorted(str(sample) for sample in dset.unique(column) if include_nulls or sample is not None)\n dst_feat = ClassLabel(names=class_names)\n\n def cast_to_class_labels(batch):\n batch[column] = [\n dst_feat.str2int(str(sample)) if include_nulls or sample is not None else None\n for sample in batch[column]\n ]\n return batch\n\n dset = dset.map(\n cast_to_class_labels,\n batched=True,\n desc=\"Casting to class labels\",\n )\n\n new_features = dset.features.copy()\n new_features[column] = dst_feat\n dset = dset.cast(new_features)\n\n return dset\n\n @fingerprint_transform(inplace=False)\n def flatten(self, new_fingerprint, max_depth=16) -> \"Dataset\":\n \"\"\"Flatten the table.\n Each column with a struct type is flattened into one column per struct field.\n Other columns are left unchanged.\n\n Returns:\n :class:`Dataset`: A copy of the dataset with flattened columns.\n \"\"\"\n dataset = copy.deepcopy(self)\n for depth in range(1, max_depth):\n if any(isinstance(field.type, pa.StructType) for field in dataset._data.schema):\n dataset._data = dataset._data.flatten()\n else:\n break\n dataset.info.features = self.features.flatten(max_depth=max_depth)\n dataset._data = update_metadata_with_features(dataset._data, dataset.features)\n logger.info(f'Flattened dataset from depth {depth} to depth {1 if depth + 1 < max_depth else \"unknown\"}.')\n dataset._fingerprint = new_fingerprint\n return dataset\n\n def cast(\n self,\n features: Features,\n batch_size: Optional[int] = 10_000,\n keep_in_memory: bool = False,\n load_from_cache_file: bool = True,\n cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 10_000,\n num_proc: Optional[int] = None,\n ) -> \"Dataset\":\n \"\"\"\n Cast the dataset to a new set of features.\n\n Args:\n features (:class:`datasets.Features`): New features to cast the dataset to.\n The name of the fields in the features must match the current column names.\n The type of the data must also be convertible from one type to the other.\n For non-trivial conversion, e.g. string <-> ClassLabel you should use :func:`map` to update the Dataset.\n batch_size (`Optional[int]`, defaults to `1000`): Number of examples per batch provided to cast.\n `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to cast.\n keep_in_memory (:obj:`bool`, default ``False``): Whether to copy the data in-memory.\n load_from_cache_file (:obj:`bool`, default `True` if caching is enabled): If a cache file storing the current computation from `function`\n can be identified, use it instead of recomputing.\n cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the\n results of the computation instead of the automatically generated cache file name.\n writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer.\n This value is a good trade-off between memory usage during the processing, and processing speed.\n Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.\n num_proc (`Optional[int]`, default `None`): Number of processes for multiprocessing. By default it doesn't\n use multiprocessing.\n\n Returns:\n :class:`Dataset`: A copy of the dataset with casted features.\n \"\"\"\n if sorted(features) != sorted(self._data.column_names):\n raise ValueError(\n f\"The columns in features ({list(features)}) must be identical \"\n f\"as the columns in the dataset: {self._data.column_names}\"\n )\n\n schema = features.arrow_schema\n format = self.format\n dataset = self.with_format(\"arrow\")\n # capture the PyArrow version here to make the lambda serializable on Windows\n dataset = dataset.map(\n partial(table_cast, schema=schema),\n batched=True,\n batch_size=batch_size,\n keep_in_memory=keep_in_memory,\n load_from_cache_file=load_from_cache_file,\n cache_file_name=cache_file_name,\n writer_batch_size=writer_batch_size,\n num_proc=num_proc,\n features=features,\n desc=\"Casting the dataset\",\n )\n dataset = dataset.with_format(**format)\n return dataset\n\n @fingerprint_transform(inplace=False)\n def cast_column(self, column: str, feature: FeatureType, new_fingerprint: str) -> \"Dataset\":\n \"\"\"Cast column to feature for decoding.\n\n Args:\n column (:obj:`str`): Column name.\n feature (:class:`FeatureType`): Target feature.\n\n Returns:\n :class:`Dataset`\n \"\"\"\n if hasattr(feature, \"cast_storage\"):\n dataset = copy.deepcopy(self)\n dataset.features[column] = feature\n dataset._fingerprint = new_fingerprint\n dataset._data = dataset._data.cast(dataset.features.arrow_schema)\n dataset._data = update_metadata_with_features(dataset._data, dataset.features)\n return dataset\n else:\n features = self.features.copy()\n features[column] = feature\n return self.cast(features)\n\n @transmit_tasks\n @fingerprint_transform(inplace=False)\n def remove_columns(self, column_names: Union[str, List[str]], new_fingerprint) -> \"Dataset\":\n \"\"\"\n Remove one or several column(s) in the dataset and the features associated to them.\n\n You can also remove a column using :func:`Dataset.map` with `remove_columns` but the present method\n is in-place (doesn't copy the data to a new dataset) and is thus faster.\n\n Args:\n column_names (:obj:`Union[str, List[str]]`): Name of the column(s) to remove.\n new_fingerprint\n\n Returns:\n :class:`Dataset`: A copy of the dataset object without the columns to remove.\n \"\"\"\n dataset = copy.deepcopy(self)\n if isinstance(column_names, str):\n column_names = [column_names]\n\n for column_name in column_names:\n if column_name not in dataset._data.column_names:\n raise ValueError(\n f\"Column name {column_name} not in the dataset. \"\n f\"Current columns in the dataset: {dataset._data.column_names}\"\n )\n\n for column_name in column_names:\n del dataset._info.features[column_name]\n\n dataset._data = dataset._data.drop(column_names)\n dataset._data = update_metadata_with_features(dataset._data, dataset.features)\n dataset._fingerprint = new_fingerprint\n return dataset\n\n @transmit_tasks\n @fingerprint_transform(inplace=False)\n def rename_column(self, original_column_name: str, new_column_name: str, new_fingerprint) -> \"Dataset\":\n \"\"\"\n Rename a column in the dataset, and move the features associated to the original column under the new column\n name.\n\n Args:\n original_column_name (:obj:`str`): Name of the column to rename.\n new_column_name (:obj:`str`): New name for the column.\n new_fingerprint\n\n Returns:\n :class:`Dataset`: A copy of the dataset with a renamed column.\n \"\"\"\n dataset = copy.deepcopy(self)\n if original_column_name not in dataset._data.column_names:\n raise ValueError(\n f\"Original column name {original_column_name} not in the dataset. \"\n f\"Current columns in the dataset: {dataset._data.column_names}\"\n )\n if new_column_name in dataset._data.column_names:\n raise ValueError(\n f\"New column name {original_column_name} already in the dataset. \"\n f\"Please choose a column name which is not already in the dataset. \"\n f\"Current columns in the dataset: {dataset._data.column_names}\"\n )\n if not new_column_name:\n raise ValueError(\"New column name is empty.\")\n\n def rename(columns):\n return [new_column_name if col == original_column_name else col for col in columns]\n\n new_column_names = rename(self._data.column_names)\n if self._format_columns is not None:\n dataset._format_columns = rename(self._format_columns)\n\n dataset._info.features = Features(\n {\n new_column_name if col == original_column_name else col: feature\n for col, feature in self._info.features.items()\n }\n )\n\n dataset._data = dataset._data.rename_columns(new_column_names)\n dataset._data = update_metadata_with_features(dataset._data, dataset.features)\n dataset._fingerprint = new_fingerprint\n return dataset\n\n @transmit_tasks\n @fingerprint_transform(inplace=False)\n def rename_columns(self, column_mapping: Dict[str, str], new_fingerprint) -> \"Dataset\":\n \"\"\"\n Rename several columns in the dataset, and move the features associated to the original columns under\n the new column names.\n\n Args:\n column_mapping (:obj:`Dict[str, str]`): A mapping of columns to rename to their new names\n\n Returns:\n :class:`Dataset`: A copy of the dataset with renamed columns\n \"\"\"\n dataset = copy.deepcopy(self)\n\n extra_columns = set(column_mapping.keys()) - set(dataset.column_names)\n if extra_columns:\n raise ValueError(\n f\"Original column names {extra_columns} not in the dataset. \"\n f\"Current columns in the dataset: {dataset._data.column_names}\"\n )\n\n number_of_duplicates_in_new_columns = len(column_mapping.values()) - len(set(column_mapping.values()))\n if number_of_duplicates_in_new_columns != 0:\n raise ValueError(\n \"New column names must all be different, but this column mapping \"\n f\"has {number_of_duplicates_in_new_columns} duplicates\"\n )\n\n empty_new_columns = [new_col for new_col in column_mapping.values() if not new_col]\n if empty_new_columns:\n raise ValueError(f\"New column names {empty_new_columns} are empty.\")\n\n def rename(columns):\n return [column_mapping[col] if col in column_mapping else col for col in columns]\n\n new_column_names = rename(self._data.column_names)\n if self._format_columns is not None:\n dataset._format_columns = rename(self._format_columns)\n\n dataset._info.features = Features(\n {\n column_mapping[col] if col in column_mapping else col: feature\n for col, feature in (self._info.features or {}).items()\n }\n )\n\n dataset._data = dataset._data.rename_columns(new_column_names)\n dataset._data = update_metadata_with_features(dataset._data, dataset.features)\n dataset._fingerprint = new_fingerprint\n return dataset\n\n def __len__(self):\n \"\"\"Number of rows in the dataset.\"\"\"\n return self.num_rows\n\n def _iter(self, decoded: bool = True):\n \"\"\"Iterate through the examples.\n\n If a formatting is set with :meth:`Dataset.set_format` rows will be returned with the\n selected format.\n \"\"\"\n for index in range(self.num_rows):\n yield self._getitem(\n index,\n decoded=decoded,\n )\n\n def __iter__(self):\n \"\"\"Iterate through the examples.\n\n If a formatting is set with :meth:`Dataset.set_format` rows will be returned with the\n selected format.\n \"\"\"\n return self._iter()\n\n def __repr__(self):\n return f\"Dataset({{\\n features: {list(self.features.keys())},\\n num_rows: {self.num_rows}\\n}})\"\n\n @property\n def format(self):\n return {\n \"type\": self._format_type,\n \"format_kwargs\": self._format_kwargs,\n \"columns\": self.column_names if self._format_columns is None else self._format_columns,\n \"output_all_columns\": self._output_all_columns,\n }\n\n @contextlib.contextmanager\n def formatted_as(\n self,\n type: Optional[str] = None,\n columns: Optional[List] = None,\n output_all_columns: bool = False,\n **format_kwargs,\n ):\n \"\"\"To be used in a `with` statement. Set __getitem__ return format (type and columns).\n\n Args:\n type (Optional ``str``): output type selected in [None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow']\n None means __getitem__ returns python objects (default)\n columns (Optional ``List[str]``): columns to format in the output\n None means __getitem__ returns all columns (default)\n output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects)\n format_kwargs: keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.\n \"\"\"\n old_format_type = self._format_type\n old_format_kwargs = self._format_kwargs\n old_format_columns = self._format_columns\n old_output_all_columns = self._output_all_columns\n try:\n self.set_format(type, columns, output_all_columns, **format_kwargs)\n yield\n finally:\n self.set_format(old_format_type, old_format_columns, old_output_all_columns, **old_format_kwargs)\n\n @fingerprint_transform(inplace=True)\n def set_format(\n self,\n type: Optional[str] = None,\n columns: Optional[List] = None,\n output_all_columns: bool = False,\n **format_kwargs,\n ):\n \"\"\"Set __getitem__ return format (type and columns). The data formatting is applied on-the-fly.\n The format ``type`` (for example \"numpy\") is used to format batches when using __getitem__.\n It's also possible to use custom transforms for formatting using :func:`datasets.Dataset.set_transform`.\n\n Args:\n type (Optional ``str``):\n Either output type selected in [None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow'].\n None means __getitem__ returns python objects (default)\n columns (Optional ``List[str]``): columns to format in the output.\n None means __getitem__ returns all columns (default).\n output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects)\n format_kwargs: keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.\n\n It is possible to call ``map`` after calling ``set_format``. Since ``map`` may add new columns, then the list of formatted columns\n gets updated. In this case, if you apply ``map`` on a dataset to add a new column, then this column will be formatted:\n\n new formatted columns = (all columns - previously unformatted columns)\n\n \"\"\"\n format_kwargs.update(format_kwargs.pop(\"format_kwargs\", {})) # allow to use self.set_format(self.format)\n\n # Check that the format_type and format_kwargs are valid and make it possible to have a Formatter\n type = get_format_type_from_alias(type)\n _ = get_formatter(type, features=self.features, **format_kwargs)\n\n # Check filter column\n if isinstance(columns, str):\n columns = [columns]\n if isinstance(columns, tuple):\n columns = list(columns)\n if columns is not None and any(col not in self._data.column_names for col in columns):\n raise ValueError(\n f\"Columns {list(filter(lambda col: col not in self._data.column_names, columns))} not in the dataset. Current columns in the dataset: {self._data.column_names}\"\n )\n if columns is not None:\n columns = columns.copy() # Ensures modifications made to the list after this call don't cause bugs\n\n self._format_type = type\n self._format_kwargs = format_kwargs\n self._format_columns = columns\n self._output_all_columns = output_all_columns\n logger.debug(\n \"Set __getitem__(key) output type to %s for %s columns \"\n \" (when key is int or slice) and %s output other (un-formatted) columns.\",\n \"python objects\" if type is None else type,\n \"no\" if columns is None else str(columns),\n \"do\" if output_all_columns else \"don't\",\n )\n\n def reset_format(self):\n \"\"\"Reset __getitem__ return format to python objects and all columns.\n\n Same as ``self.set_format()``\n \"\"\"\n self.set_format()\n\n def set_transform(\n self,\n transform: Optional[Callable],\n columns: Optional[List] = None,\n output_all_columns: bool = False,\n ):\n \"\"\"Set __getitem__ return format using this transform. The transform is applied on-the-fly on batches when __getitem__ is called.\n As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format`\n\n Args:\n transform (Optional ``Callable``): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format`\n A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.\n This function is applied right before returning the objects in __getitem__.\n columns (Optional ``List[str]``): columns to format in the output\n If specified, then the input batch of the transform only contains those columns.\n output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects)\n If set to True, then the other un-formatted columns are kept with the output of the transform.\n\n \"\"\"\n self.set_format(\"custom\", columns=columns, output_all_columns=output_all_columns, transform=transform)\n\n def with_format(\n self,\n type: Optional[str] = None,\n columns: Optional[List] = None,\n output_all_columns: bool = False,\n **format_kwargs,\n ):\n \"\"\"Set __getitem__ return format (type and columns). The data formatting is applied on-the-fly.\n The format ``type`` (for example \"numpy\") is used to format batches when using __getitem__.\n\n It's also possible to use custom transforms for formatting using :func:`datasets.Dataset.with_transform`.\n\n Contrary to :func:`datasets.Dataset.set_format`, ``with_format`` returns a new Dataset object.\n\n Args:\n type (Optional ``str``):\n Either output type selected in [None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow'].\n None means __getitem__ returns python objects (default)\n columns (Optional ``List[str]``): columns to format in the output\n None means __getitem__ returns all columns (default)\n output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects)\n format_kwargs: keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.\n \"\"\"\n dataset = copy.deepcopy(self)\n dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)\n return dataset\n\n def with_transform(\n self,\n transform: Optional[Callable],\n columns: Optional[List] = None,\n output_all_columns: bool = False,\n ):\n \"\"\"Set __getitem__ return format using this transform. The transform is applied on-the-fly on batches when __getitem__ is called.\n\n As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format`.\n\n Contrary to :func:`datasets.Dataset.set_transform`, ``with_transform`` returns a new Dataset object.\n\n Args:\n transform (Optional ``Callable``): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format`\n A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.\n This function is applied right before returning the objects in __getitem__.\n columns (Optional ``List[str]``): columns to format in the output\n If specified, then the input batch of the transform only contains those columns.\n output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects)\n If set to True, then the other un-formatted columns are kept with the output of the transform.\n\n \"\"\"\n dataset = copy.deepcopy(self)\n dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns)\n return dataset\n\n def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> \"Dataset\":\n \"\"\"Prepare a dataset for the given task by casting the dataset's :class:`Features` to standardized column names and types as detailed in :py:mod:`datasets.tasks`.\n\n Casts :attr:`datasets.DatasetInfo.features` according to a task-specific schema. Intended for single-use only, so all task templates are removed from :attr:`datasets.DatasetInfo.task_templates` after casting.\n\n Args:\n task (:obj:`Union[str, TaskTemplate]`): The task to prepare the dataset for during training and evaluation. If :obj:`str`, supported tasks include:\n\n - :obj:`\"text-classification\"`\n - :obj:`\"question-answering\"`\n\n If :obj:`TaskTemplate`, must be one of the task templates in :py:mod:`datasets.tasks`.\n id (:obj:`int`, default `0`): The id required to unambiguously identify the task template when multiple task templates of the same type are supported.\n \"\"\"\n # TODO(lewtun): Add support for casting nested features like answers.text and answers.answer_start in SQuAD\n if isinstance(task, str):\n tasks = [template.task for template in (self.info.task_templates or [])]\n compatible_templates = [template for template in (self.info.task_templates or []) if template.task == task]\n if not compatible_templates:\n raise ValueError(\n f\"Task {task} is not compatible with this dataset! Available tasks: {list(unique_values(tasks))}\"\n )\n\n if not 0 <= id < len(compatible_templates):\n templates_list_str = \"\\n\".join(\n f\"- `{idx}` for task {template}\" for idx, template in enumerate(compatible_templates)\n )\n raise ValueError(\n f\"Id {id} for task {task} is not in a valid range. Supported ids:\\n{templates_list_str}\"\n )\n template = compatible_templates[id]\n elif isinstance(task, TaskTemplate):\n template = task\n else:\n raise ValueError(\n f\"Expected a `str` or `datasets.TaskTemplate` object but got task {task} with type {type(task)}.\"\n )\n template = template.align_with_features(self.info.features)\n column_mapping = template.column_mapping\n columns_to_drop = [column for column in self.column_names if column not in column_mapping]\n dataset = self.remove_columns(columns_to_drop)\n dataset = dataset.rename_columns(column_mapping)\n # We found a template so now flush `DatasetInfo` to skip the template update in `DatasetInfo.__post_init__`\n dataset.info.task_templates = None\n dataset = dataset.cast(features=template.features)\n return dataset\n\n def _getitem(self, key: Union[int, slice, str], decoded: bool = True, **kwargs) -> Union[Dict, List]:\n \"\"\"\n Can be used to index columns (by string names) or rows (by integer index, slices, or iter of indices or bools)\n \"\"\"\n format_type = kwargs[\"format_type\"] if \"format_type\" in kwargs else self._format_type\n format_columns = kwargs[\"format_columns\"] if \"format_columns\" in kwargs else self._format_columns\n output_all_columns = (\n kwargs[\"output_all_columns\"] if \"output_all_columns\" in kwargs else self._output_all_columns\n )\n format_kwargs = kwargs[\"format_kwargs\"] if \"format_kwargs\" in kwargs else self._format_kwargs\n format_kwargs = format_kwargs if format_kwargs is not None else {}\n formatter = get_formatter(format_type, features=self.features, decoded=decoded, **format_kwargs)\n pa_subtable = query_table(self._data, key, indices=self._indices if self._indices is not None else None)\n formatted_output = format_table(\n pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns\n )\n return formatted_output\n\n @overload\n def __getitem__(self, key: Union[int, slice, Iterable[int]]) -> Dict: # noqa: F811\n ...\n\n @overload\n def __getitem__(self, key: str) -> List: # noqa: F811\n ...\n\n def __getitem__(self, key): # noqa: F811\n \"\"\"Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools).\"\"\"\n return self._getitem(\n key,\n )\n\n def cleanup_cache_files(self) -> int:\n \"\"\"Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is\n one.\n\n Be careful when running this command that no other process is currently using other cache files.\n\n Returns:\n :obj:`int`: Number of removed files.\n \"\"\"\n current_cache_files = [os.path.abspath(cache_file[\"filename\"]) for cache_file in self.cache_files]\n if not current_cache_files:\n return 0\n cache_directory = os.path.dirname(current_cache_files[0])\n logger.info(f\"Listing files in {cache_directory}\")\n files: List[str] = os.listdir(cache_directory)\n files_to_remove = []\n for f_name in files:\n full_name = os.path.abspath(os.path.join(cache_directory, f_name))\n if f_name.startswith(\"cache-\") and f_name.endswith(\".arrow\"):\n if full_name in current_cache_files:\n logger.info(f\"Keeping currently used cache file at {full_name}\")\n continue\n files_to_remove.append(full_name)\n for file_path in files_to_remove:\n logger.info(f\"Removing {file_path}\")\n os.remove(file_path)\n return len(files_to_remove)\n\n def _get_cache_file_path(self, fingerprint):\n if is_caching_enabled() and self.cache_files:\n cache_file_name = \"cache-\" + fingerprint + \".arrow\"\n cache_directory = os.path.dirname(self.cache_files[0][\"filename\"])\n else:\n cache_file_name = \"cache-\" + generate_random_fingerprint() + \".arrow\"\n cache_directory = get_temporary_cache_files_directory()\n cache_file_path = os.path.join(cache_directory, cache_file_name)\n return cache_file_path\n\n def map(\n self,\n function: Optional[Callable] = None,\n with_indices: bool = False,\n with_rank: bool = False,\n input_columns: Optional[Union[str, List[str]]] = None,\n batched: bool = False,\n batch_size: Optional[int] = 1000,\n drop_last_batch: bool = False,\n remove_columns: Optional[Union[str, List[str]]] = None,\n keep_in_memory: bool = False,\n load_from_cache_file: bool = None,\n cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n features: Optional[Features] = None,\n disable_nullable: bool = False,\n fn_kwargs: Optional[dict] = None,\n num_proc: Optional[int] = None,\n suffix_template: str = \"_{rank:05d}_of_{num_proc:05d}\",\n new_fingerprint: Optional[str] = None,\n desc: Optional[str] = None,\n ) -> \"Dataset\":\n \"\"\"Apply a function to all the elements in the table (individually or in batches)\n and update the table (if function does update examples).\n\n Args:\n function (:obj:`Callable`): Function with one of the following signatures:\n\n - `function(example: Union[Dict, Any]) -> Union[Dict, Any]` if `batched=False` and `with_indices=False` and `with_rank=False`\n - `function(example: Union[Dict, Any], *extra_args) -> Union[Dict, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)\n - `function(batch: Union[Dict[List], List[Any]]) -> Union[Dict, Any]` if `batched=True` and `with_indices=False` and `with_rank=False`\n - `function(batch: Union[Dict[List], List[Any]], *extra_args) -> Union[Dict, Any]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)\n\n If no function is provided, default to identity function: ``lambda x: x``.\n with_indices (:obj:`bool`, default `False`): Provide example indices to `function`. Note that in this case the\n signature of `function` should be `def function(example, idx[, rank]): ...`.\n with_rank (:obj:`bool`, default `False`): Provide process rank to `function`. Note that in this case the\n signature of `function` should be `def function(example[, idx], rank): ...`.\n input_columns (`Optional[Union[str, List[str]]]`, default `None`): The columns to be passed into `function`\n as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.\n batched (:obj:`bool`, default `False`): Provide batch of examples to `function`.\n batch_size (`Optional[int]`, default `1000`): Number of examples per batch provided to `function` if `batched=True`\n `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function`.\n drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be\n dropped instead of being processed by the function.\n remove_columns (`Optional[Union[str, List[str]]]`, default `None`): Remove a selection of columns while doing the mapping.\n Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding\n columns with names in `remove_columns`, these columns will be kept.\n keep_in_memory (:obj:`bool`, default `False`): Keep the dataset in memory instead of writing it to a cache file.\n load_from_cache_file (:obj:`bool`, default `True` if caching is enabled): If a cache file storing the current computation from `function`\n can be identified, use it instead of recomputing.\n cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the\n results of the computation instead of the automatically generated cache file name.\n writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer.\n This value is a good trade-off between memory usage during the processing, and processing speed.\n Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.\n features (`Optional[datasets.Features]`, default `None`): Use a specific Features to store the cache file\n instead of the automatically generated one.\n disable_nullable (:obj:`bool`, default `False`): Disallow null values in the table.\n fn_kwargs (`Optional[Dict]`, default `None`): Keyword arguments to be passed to `function`.\n num_proc (`Optional[int]`, default `None`): Max number of processes when generating cache. Already cached shards are loaded sequentially\n suffix_template (:obj:`str`):\n If cache_file_name is specified, then this suffix\n will be added at the end of the base name of each: defaults to \"_{rank:05d}_of_{num_proc:05d}\". For example, if cache_file_name is \"processed.arrow\", then for\n rank=1 and num_proc=4, the resulting file would be \"processed_00001_of_00004.arrow\" for the default suffix.\n new_fingerprint (`Optional[str]`, default `None`): the new fingerprint of the dataset after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.\n desc (`Optional[str]`, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while mapping examples.\n \"\"\"\n if keep_in_memory and cache_file_name is not None:\n raise ValueError(\"Please use either `keep_in_memory` or `cache_file_name` but not both.\")\n\n if num_proc is not None and num_proc <= 0:\n raise ValueError(\"num_proc must be an integer > 0.\")\n\n # If the array is empty we do nothing\n if len(self) == 0:\n return self\n\n if function is None:\n function = lambda x: x # noqa: E731\n\n def decorate(f):\n \"\"\"\n Decorate the mapped function, so that its first argument is wrapped with a LazyDict to be used internally\n but a standard dictionary is returned at the end of the mapping.\n \"\"\"\n\n @wraps(f)\n def decorated(item, *args, **kwargs):\n # Decorate first arg with LazyDict (either Example or Batch)\n decorated_item = (\n Example(item, features=self.features) if not batched else Batch(item, features=self.features)\n )\n # Use the LazyDict internally, while mapping the function\n result = f(decorated_item, *args, **kwargs)\n # Return a standard dict\n return result.data if isinstance(result, LazyDict) else result\n\n return decorated\n\n function = decorate(function) if not self._format_type and not input_columns else function\n\n if isinstance(input_columns, str):\n input_columns = [input_columns]\n\n if input_columns is not None:\n for input_column in input_columns:\n if input_column not in self._data.column_names:\n raise ValueError(\n f\"Input column {input_column} not in the dataset. Current columns in the dataset: {self._data.column_names}\"\n )\n\n if isinstance(remove_columns, str):\n remove_columns = [remove_columns]\n\n if remove_columns is not None and any(col not in self._data.column_names for col in remove_columns):\n raise ValueError(\n f\"Column to remove {list(filter(lambda col: col not in self._data.column_names, remove_columns))} not in the dataset. Current columns in the dataset: {self._data.column_names}\"\n )\n\n load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()\n\n if fn_kwargs is None:\n fn_kwargs = {}\n\n if num_proc is not None and num_proc > len(self):\n num_proc = len(self)\n logger.warning(\n f\"num_proc must be <= {len(self)}. Reducing num_proc to {num_proc} for dataset of size {len(self)}.\"\n )\n\n disable_tqdm = not utils.is_progress_bar_enabled()\n\n if num_proc is None or num_proc == 1:\n return self._map_single(\n function=function,\n with_indices=with_indices,\n with_rank=with_rank,\n input_columns=input_columns,\n batched=batched,\n batch_size=batch_size,\n drop_last_batch=drop_last_batch,\n remove_columns=remove_columns,\n keep_in_memory=keep_in_memory,\n load_from_cache_file=load_from_cache_file,\n cache_file_name=cache_file_name,\n writer_batch_size=writer_batch_size,\n features=features,\n disable_nullable=disable_nullable,\n fn_kwargs=fn_kwargs,\n new_fingerprint=new_fingerprint,\n disable_tqdm=disable_tqdm,\n desc=desc,\n )\n else:\n\n def format_cache_file_name(cache_file_name, rank):\n sep = cache_file_name.rindex(\".\")\n base_name, extension = cache_file_name[:sep], cache_file_name[sep:]\n cache_file_name = base_name + suffix_template.format(rank=rank, num_proc=num_proc) + extension\n logger.info(f\"Process #{rank} will write at {cache_file_name}\")\n return cache_file_name\n\n def format_new_fingerprint(new_fingerprint, rank):\n return new_fingerprint + suffix_template.format(rank=rank, num_proc=num_proc)\n\n prev_env = deepcopy(os.environ)\n # check if parallelism if off\n # from https://github.com/huggingface/tokenizers/blob/bb668bc439dc34389b71dbb8ce0c597f15707b53/tokenizers/src/utils/parallelism.rs#L22\n if prev_env.get(\"TOKENIZERS_PARALLELISM\", \"false\").lower() not in (\n \"\",\n \"off\",\n \"false\",\n \"f\",\n \"no\",\n \"n\",\n \"0\",\n ):\n logger.warning(\"Setting TOKENIZERS_PARALLELISM=false for forked processes.\")\n os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n initargs, initializer = None, None\n if not disable_tqdm:\n initargs, initializer = (RLock(),), tqdm.set_lock\n\n shards = [\n self.shard(num_shards=num_proc, index=rank, contiguous=True, keep_in_memory=keep_in_memory)\n for rank in range(num_proc)\n ]\n kwds_per_shard = [\n dict(\n self=shards[rank],\n function=function,\n with_indices=with_indices,\n with_rank=with_rank,\n input_columns=input_columns,\n batched=batched,\n batch_size=batch_size,\n drop_last_batch=drop_last_batch,\n remove_columns=remove_columns,\n keep_in_memory=keep_in_memory,\n load_from_cache_file=load_from_cache_file,\n cache_file_name=format_cache_file_name(cache_file_name, rank)\n if cache_file_name is not None\n else None,\n writer_batch_size=writer_batch_size,\n features=features.copy() if features is not None else None,\n disable_nullable=disable_nullable,\n fn_kwargs=fn_kwargs,\n rank=rank,\n offset=sum(len(s) for s in shards[:rank]),\n disable_tqdm=disable_tqdm,\n new_fingerprint=format_new_fingerprint(new_fingerprint, rank)\n if new_fingerprint is not None\n else None,\n desc=desc,\n )\n for rank in range(num_proc)\n ]\n\n # We search for already cached shards\n def catch_non_existent_error(func, kwargs):\n try:\n return func(**kwargs)\n except NonExistentDatasetError:\n return None\n\n transformed_shards = [\n catch_non_existent_error(self.__class__._map_single, dict(cache_only=True, **kwds))\n for kwds in kwds_per_shard\n ]\n\n # We try to create a pool with as many workers as dataset not yet cached.\n nb_of_missing_shards = transformed_shards.count(None)\n if nb_of_missing_shards > 0:\n with Pool(nb_of_missing_shards, initargs=initargs, initializer=initializer) as pool:\n os.environ = prev_env\n logger.info(f\"Spawning {num_proc} processes\")\n results = {\n i: pool.apply_async(self.__class__._map_single, kwds=kwds)\n for i, (kwds, cached_shard) in enumerate(zip(kwds_per_shard, transformed_shards))\n if cached_shard is None\n }\n assert (\n len(results) == nb_of_missing_shards\n ), \"The number of missing cached shards needs to correspond to the number of `_map_single` we're running\"\n\n for index, async_result in results.items():\n transformed_shards[index] = async_result.get()\n\n assert (\n transformed_shards.count(None) == 0\n ), \"All shards have to be defined Datasets, none should still be missing.\"\n\n logger.info(f\"Concatenating {num_proc} shards\")\n result = concatenate_datasets(transformed_shards)\n if new_fingerprint is not None:\n result._fingerprint = new_fingerprint\n return result\n\n @transmit_tasks\n @transmit_format\n @fingerprint_transform(\n inplace=False, ignore_kwargs=[\"load_from_cache_file\", \"cache_file_name\", \"disable_tqdm\", \"desc\", \"cache_only\"]\n )\n def _map_single(\n self,\n function: Optional[Callable] = None,\n with_indices: bool = False,\n with_rank: bool = False,\n input_columns: Optional[List[str]] = None,\n batched: bool = False,\n batch_size: Optional[int] = 1000,\n drop_last_batch: bool = False,\n remove_columns: Optional[List[str]] = None,\n keep_in_memory: bool = False,\n load_from_cache_file: bool = None,\n cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n features: Optional[Features] = None,\n disable_nullable: bool = False,\n fn_kwargs: Optional[dict] = None,\n new_fingerprint: Optional[str] = None,\n rank: Optional[int] = None,\n offset: int = 0,\n disable_tqdm: bool = False,\n desc: Optional[str] = None,\n cache_only: bool = False,\n ) -> \"Dataset\":\n \"\"\"Apply a function to all the elements in the table (individually or in batches)\n and update the table (if function does update examples).\n\n Args:\n function (:obj:`Callable`): with one of the following signature:\n - `function(example: Union[Dict, Any]) -> Union[Dict, Any]` if `batched=False` and `with_indices=False` and `with_rank=False`\n - `function(example: Union[Dict, Any], *extra_args) -> Union[Dict, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)\n - `function(batch: Union[Dict[List], List[Any]]) -> Union[Dict, Any]` if `batched=True` and `with_indices=False` and `with_rank=False`\n - `function(batch: Union[Dict[List], List[Any]], *extra_args) -> Union[Dict, Any]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)\n If no function is provided, default to identity function: lambda x: x\n with_indices (:obj:`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.\n with_rank (:obj:`bool`, default `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`.\n input_columns (`Optional[List[str]]`, defaults to `None`): The columns to be passed into `function` as\n positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.\n batched (:obj:`bool`, defaults to `False`): Provide batch of examples to `function`\n batch_size (`Optional[int]`, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`\n `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function`\n drop_last_batch (:obj:`bool`, default: `False`): Whether a last batch smaller than the batch_size should be\n dropped instead of being processed by the function.\n remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping.\n Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding\n columns with names in `remove_columns`, these columns will be kept.\n keep_in_memory (:obj:`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file.\n load_from_cache_file (:obj:`bool`, defaults to `True` if caching is enabled): If a cache file storing the current computation from `function`\n can be identified, use it instead of recomputing.\n cache_file_name (`Optional[str]`, defaults to `None`): Provide the name of a path for the cache file. It is used to store the\n results of the computation instead of the automatically generated cache file name.\n writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer.\n This value is a good trade-off between memory usage during the processing, and processing speed.\n Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.\n features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file\n instead of the automatically generated one.\n disable_nullable (:obj:`bool`, defaults to `False`): Disallow null values in the table.\n fn_kwargs (`Optional[Dict]`, defaults to `None`): Keyword arguments to be passed to `function`\n new_fingerprint (`Optional[str]`, defaults to `None`): the new fingerprint of the dataset after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments\n rank: (`Optional[int]`, defaults to `None`): If specified, this is the process rank when doing multiprocessing\n offset: (:obj:`int`, defaults to 0): If specified, this is an offset applied to the indices passed to `function` if `with_indices=True`.\n disable_tqdm (:obj:`bool`, defaults to `False`): Whether to silence tqdm's output.\n desc (`Optional[str]`, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while mapping examples.\n cache_only (`bool`, defaults to `False`): Flag in order to notifiy the method will either find a cached dataset or raise `NonExistentDatasetError` exception,\n \"\"\"\n # Reduce logging to keep things readable in multiprocessing with tqdm\n if rank is not None and logging.get_verbosity() < logging.WARNING:\n logging.set_verbosity_warning()\n # Print at least one thing to fix tqdm in notebooks in multiprocessing\n # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308\n if rank is not None and not disable_tqdm and any(\"notebook\" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__):\n print(\" \", end=\"\", flush=True)\n\n if fn_kwargs is None:\n fn_kwargs = {}\n\n # If we do batch computation but no batch size is provided, default to the full dataset\n if batched and (batch_size is None or batch_size <= 0):\n batch_size = self.num_rows\n\n # Check if we've already cached this computation (indexed by a hash)\n if self.cache_files:\n if cache_file_name is None:\n # we create a unique hash from the function,\n # current dataset file and the mapping args\n cache_file_name = self._get_cache_file_path(new_fingerprint)\n if os.path.exists(cache_file_name) and load_from_cache_file:\n logger.warning(f\"Loading cached processed dataset at {cache_file_name}\")\n info = self.info.copy()\n info.features = features\n info.task_templates = None\n return Dataset.from_file(cache_file_name, info=info, split=self.split)\n\n # Raise an error if we were supposed to return a cached dataset and none was found\n if cache_only:\n raise NonExistentDatasetError\n\n # We set this variable to True after processing the first example/batch in\n # `apply_function_on_filtered_inputs` if the map function returns a dict.\n # If set to False, no new arrow table will be created\n update_data = None\n\n class NumExamplesMismatchError(Exception):\n pass\n\n def validate_function_output(processed_inputs, indices):\n \"\"\"Validate output of the map function.\"\"\"\n if processed_inputs is not None and not isinstance(processed_inputs, (Mapping, pa.Table)):\n raise TypeError(\n f\"Provided `function` which is applied to all elements of table returns a variable of type {type(processed_inputs)}. Make sure provided `function` returns a variable of type `dict` (or a pyarrow table) to update the dataset or `None` if you are only interested in side effects.\"\n )\n elif isinstance(indices, list) and isinstance(processed_inputs, Mapping):\n allowed_batch_return_types = (list, np.ndarray)\n all_dict_values_are_lists = all(\n isinstance(value, allowed_batch_return_types) for value in processed_inputs.values()\n )\n if all_dict_values_are_lists is False:\n raise TypeError(\n f\"Provided `function` which is applied to all elements of table returns a `dict` of types {[type(x) for x in processed_inputs.values()]}. When using `batched=True`, make sure provided `function` returns a `dict` of types like `{allowed_batch_return_types}`.\"\n )\n\n def apply_function_on_filtered_inputs(inputs, indices, check_same_num_examples=False, offset=0):\n \"\"\"Utility to apply the function on a selection of columns.\"\"\"\n nonlocal update_data\n fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns]\n if offset == 0:\n effective_indices = indices\n else:\n effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset\n additional_args = ()\n if with_indices:\n additional_args += (effective_indices,)\n if with_rank:\n additional_args += (rank,)\n processed_inputs = function(*fn_args, *additional_args, **fn_kwargs)\n if update_data is None:\n # Check if the function returns updated examples\n update_data = isinstance(processed_inputs, (Mapping, pa.Table))\n validate_function_output(processed_inputs, indices)\n if not update_data:\n return None # Nothing to update, let's move on\n if self._format_type is not None:\n inputs = self._getitem(\n key=(indices if isinstance(indices, int) else slice(indices[0], indices[-1] + 1)),\n format_type=None,\n format_columns=None,\n format_kwargs=None,\n decoded=False,\n )\n if remove_columns is not None:\n for column in remove_columns:\n inputs.pop(column)\n if check_same_num_examples:\n input_num_examples = len(inputs[next(iter(inputs.keys()))])\n processed_inputs_num_examples = len(processed_inputs[next(iter(processed_inputs.keys()))])\n if input_num_examples != processed_inputs_num_examples:\n raise NumExamplesMismatchError()\n if isinstance(inputs, dict) and isinstance(processed_inputs, Mapping):\n inputs.update(processed_inputs)\n return inputs\n else:\n return processed_inputs\n\n def init_buffer_and_writer():\n # Prepare output buffer and batched writer in memory or on file if we update the table\n writer_features = features\n if writer_features is None:\n writer_features = self.features\n update_features = True\n else:\n update_features = False\n if keep_in_memory or cache_file_name is None:\n buf_writer = pa.BufferOutputStream()\n tmp_file = None\n writer = ArrowWriter(\n features=writer_features,\n stream=buf_writer,\n writer_batch_size=writer_batch_size,\n update_features=update_features,\n fingerprint=new_fingerprint,\n disable_nullable=disable_nullable,\n )\n else:\n buf_writer = None\n logger.info(f\"Caching processed dataset at {cache_file_name}\")\n tmp_file = tempfile.NamedTemporaryFile(\"wb\", dir=os.path.dirname(cache_file_name), delete=False)\n writer = ArrowWriter(\n features=writer_features,\n path=tmp_file.name,\n writer_batch_size=writer_batch_size,\n update_features=update_features,\n fingerprint=new_fingerprint,\n disable_nullable=disable_nullable,\n )\n return buf_writer, writer, tmp_file\n\n # If `update_data` is True after processing the first example/batch, initalize these resources with `init_buffer_and_writer`\n buf_writer, writer, tmp_file = None, None, None\n\n # Optionally initialize the writer as a context manager\n with contextlib.ExitStack() as stack:\n try:\n # Only load the columns we actually need\n if input_columns:\n input_dataset = self.with_format(\n self._format_type, columns=input_columns, output_all_columns=False, **self._format_kwargs\n )\n if remove_columns:\n remove_columns = list(set(remove_columns) & set(input_columns))\n else:\n input_dataset = self\n\n # Loop over single examples or batches and write to buffer/file if examples are to be updated\n if not batched:\n pbar_iterable = input_dataset._iter(decoded=False)\n pbar_total = len(input_dataset)\n else:\n num_rows = (\n len(input_dataset) if not drop_last_batch else len(input_dataset) // batch_size * batch_size\n )\n pbar_iterable = range(0, num_rows, batch_size)\n pbar_total = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size\n pbar_unit = \"ex\" if not batched else \"ba\"\n pbar_desc = (desc + \" \" if desc is not None else \"\") + \"#\" + str(rank) if rank is not None else desc\n pbar = utils.tqdm(\n pbar_iterable,\n total=pbar_total,\n disable=disable_tqdm,\n position=rank,\n unit=pbar_unit,\n desc=pbar_desc,\n )\n if not batched:\n for i, example in enumerate(pbar):\n example = apply_function_on_filtered_inputs(example, i, offset=offset)\n if update_data:\n if i == 0:\n buf_writer, writer, tmp_file = init_buffer_and_writer()\n stack.enter_context(writer)\n if isinstance(example, pa.Table):\n writer.write_row(example)\n else:\n writer.write(example)\n else:\n for i in pbar:\n batch = input_dataset._getitem(\n slice(i, i + batch_size),\n decoded=False,\n )\n indices = list(\n range(*(slice(i, i + batch_size).indices(input_dataset.num_rows)))\n ) # Something simpler?\n try:\n batch = apply_function_on_filtered_inputs(\n batch,\n indices,\n check_same_num_examples=len(input_dataset.list_indexes()) > 0,\n offset=offset,\n )\n except NumExamplesMismatchError:\n raise DatasetTransformationNotAllowedError(\n \"Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it.\"\n ) from None\n if update_data:\n if i == 0:\n buf_writer, writer, tmp_file = init_buffer_and_writer()\n stack.enter_context(writer)\n if isinstance(batch, pa.Table):\n writer.write_table(batch)\n else:\n writer.write_batch(batch)\n if update_data and writer is not None:\n writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file\n except (Exception, KeyboardInterrupt):\n if update_data:\n if writer is not None:\n writer.finalize()\n if tmp_file is not None:\n tmp_file.close()\n if os.path.exists(tmp_file.name):\n os.remove(tmp_file.name)\n raise\n\n if update_data and tmp_file is not None:\n tmp_file.close()\n shutil.move(tmp_file.name, cache_file_name)\n umask = os.umask(0o666)\n os.umask(umask)\n os.chmod(cache_file_name, 0o666 & ~umask)\n\n if update_data:\n # Create new Dataset from buffer or file\n info = self.info.copy()\n info.features = writer._features\n info.task_templates = None\n if buf_writer is None:\n return Dataset.from_file(cache_file_name, info=info, split=self.split)\n else:\n return Dataset.from_buffer(buf_writer.getvalue(), info=info, split=self.split)\n else:\n return self\n\n @transmit_format\n @fingerprint_transform(\n inplace=False, ignore_kwargs=[\"load_from_cache_file\", \"cache_file_name\", \"desc\"], version=\"2.0.1\"\n )\n def filter(\n self,\n function: Optional[Callable] = None,\n with_indices=False,\n input_columns: Optional[Union[str, List[str]]] = None,\n batched: bool = False,\n batch_size: Optional[int] = 1000,\n remove_columns: Optional[List[str]] = None,\n keep_in_memory: bool = False,\n load_from_cache_file: bool = True,\n cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n fn_kwargs: Optional[dict] = None,\n num_proc: Optional[int] = None,\n suffix_template: str = \"_{rank:05d}_of_{num_proc:05d}\",\n new_fingerprint: Optional[str] = None,\n desc: Optional[str] = None,\n ) -> \"Dataset\":\n \"\"\"Apply a filter function to all the elements in the table in batches\n and update the table so that the dataset only includes examples according to the filter function.\n\n Args:\n function (:obj:`Callable`): Callable with one of the following signatures:\n\n - ``function(example: Union[Dict, Any]) -> bool`` if ``with_indices=False, batched=False``\n - ``function(example: Union[Dict, Any], indices: int) -> bool`` if ``with_indices=True, batched=False``\n - ``function(example: Union[Dict, Any]) -> List[bool]`` if ``with_indices=False, batched=True``\n - ``function(example: Union[Dict, Any], indices: int) -> List[bool]`` if ``with_indices=True, batched=True``\n\n If no function is provided, defaults to an always True function: ``lambda x: True``.\n with_indices (:obj:`bool`, default `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.\n input_columns (:obj:`str` or `List[str]`, optional): The columns to be passed into `function` as\n positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.\n batched (:obj:`bool`, defaults to `False`): Provide batch of examples to `function`\n batch_size (:obj:`int`, optional, default `1000`): Number of examples per batch provided to `function` if\n ``batched = True``. If ``batched = False``, one example per batch is passed to ``function``.\n If ``batch_size <= 0`` or ``batch_size == None``: provide the full dataset as a single batch to `function`\n keep_in_memory (:obj:`bool`, default `False`): Keep the dataset in memory instead of writing it to a cache file.\n load_from_cache_file (:obj:`bool`, default `True`): If a cache file storing the current computation from `function`\n can be identified, use it instead of recomputing.\n cache_file_name (:obj:`str`, optional): Provide the name of a path for the cache file. It is used to store the\n results of the computation instead of the automatically generated cache file name.\n writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer.\n This value is a good trade-off between memory usage during the processing, and processing speed.\n Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.\n fn_kwargs (:obj:`dict`, optional): Keyword arguments to be passed to `function`\n num_proc (:obj:`int`, optional): Number of processes for multiprocessing. By default it doesn't\n use multiprocessing.\n suffix_template (:obj:`str`):\n If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each.\n For example, if `cache_file_name` is `\"processed.arrow\"`, then for ``rank = 1`` and ``num_proc = 4``,\n the resulting file would be `\"processed_00001_of_00004.arrow\"` for the default suffix (default\n `_{rank:05d}_of_{num_proc:05d}`)\n new_fingerprint (:obj:`str`, optional): The new fingerprint of the dataset after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.\n desc (`Optional[str]`, defaults to `None`): Meaningful description to be displayed alongside with the progress bar while filtering examples.\n \"\"\"\n if len(self.list_indexes()) > 0:\n raise DatasetTransformationNotAllowedError(\n \"Using `.filter` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.`\"\n )\n\n if function is None:\n function = lambda x: True # noqa: E731\n\n if remove_columns is not None:\n raise ValueError(\"Parameter `remove_columns` passed to .filter() is no longer supported.\")\n\n indices = self.map(\n function=partial(\n get_indices_from_mask_function, function, batched, with_indices, input_columns, self._indices\n ),\n with_indices=True,\n features=Features({\"indices\": Value(\"uint64\")}),\n batched=True,\n batch_size=batch_size,\n remove_columns=self.column_names,\n keep_in_memory=keep_in_memory,\n load_from_cache_file=load_from_cache_file,\n cache_file_name=cache_file_name,\n writer_batch_size=writer_batch_size,\n fn_kwargs=fn_kwargs,\n num_proc=num_proc,\n suffix_template=suffix_template,\n new_fingerprint=new_fingerprint,\n input_columns=input_columns,\n desc=desc,\n )\n new_dataset = copy.deepcopy(self)\n new_dataset._indices = indices.data\n new_dataset._fingerprint = new_fingerprint\n return new_dataset\n\n @transmit_format\n @fingerprint_transform(inplace=False, ignore_kwargs=[\"cache_file_name\"])\n def flatten_indices(\n self,\n keep_in_memory: bool = False,\n cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n features: Optional[Features] = None,\n disable_nullable: bool = False,\n new_fingerprint: Optional[str] = None,\n ) -> \"Dataset\":\n \"\"\"Create and cache a new Dataset by flattening the indices mapping.\n\n Args:\n keep_in_memory (:obj:`bool`, default `False`): Keep the dataset in memory instead of writing it to a cache file.\n cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the\n results of the computation instead of the automatically generated cache file name.\n writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer.\n This value is a good trade-off between memory usage during the processing, and processing speed.\n Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.\n features (`Optional[datasets.Features]`, default `None`): Use a specific Features to store the cache file\n instead of the automatically generated one.\n disable_nullable (:obj:`bool`, default `False`): Allow null values in the table.\n new_fingerprint (`Optional[str]`, default `None`): The new fingerprint of the dataset after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments\n \"\"\"\n\n return self.map(\n batched=True, # for speed\n keep_in_memory=keep_in_memory,\n cache_file_name=cache_file_name,\n writer_batch_size=writer_batch_size,\n features=features,\n disable_nullable=disable_nullable,\n new_fingerprint=new_fingerprint,\n desc=\"Flattening the indices\",\n )\n\n def _new_dataset_with_indices(\n self,\n indices_cache_file_name: Optional[str] = None,\n indices_buffer: Optional[pa.Buffer] = None,\n fingerprint: Optional[str] = None,\n ) -> \"Dataset\":\n \"\"\"Return a new Dataset obtained by adding indices (provided in indices_cache_file_name or in a buffer) to the\n current Dataset.\n \"\"\"\n\n if indices_cache_file_name is None and indices_buffer is None:\n raise ValueError(\"At least one of indices_cache_file_name or indices_buffer must be provided.\")\n\n if fingerprint is None:\n raise ValueError(\"please specify a fingerprint for the dataset with indices\")\n\n if indices_cache_file_name is not None:\n indices_table = MemoryMappedTable.from_file(indices_cache_file_name)\n else:\n indices_table = InMemoryTable.from_buffer(indices_buffer)\n\n # Return new Dataset object\n # don't forget to copy the objects\n return Dataset(\n self._data,\n info=self.info.copy(),\n split=self.split,\n indices_table=indices_table,\n fingerprint=fingerprint,\n )\n\n @transmit_format\n @fingerprint_transform(inplace=False, ignore_kwargs=[\"indices_cache_file_name\"])\n def select(\n self,\n indices: Iterable,\n keep_in_memory: bool = False,\n indices_cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n new_fingerprint: Optional[str] = None,\n ) -> \"Dataset\":\n \"\"\"Create a new dataset with rows selected following the list/array of indices.\n\n Args:\n indices (sequence, iterable, ndarray or Series): List or 1D-array of integer indices for indexing.\n keep_in_memory (:obj:`bool`, default `False`): Keep the indices mapping in memory instead of writing it to a cache file.\n indices_cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the\n indices mapping instead of the automatically generated cache file name.\n writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer.\n This value is a good trade-off between memory usage during the processing, and processing speed.\n Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.\n new_fingerprint (`Optional[str]`, default `None`): the new fingerprint of the dataset after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments\n \"\"\"\n if keep_in_memory and indices_cache_file_name is not None:\n raise ValueError(\"Please use either `keep_in_memory` or `indices_cache_file_name` but not both.\")\n\n if len(self.list_indexes()) > 0:\n raise DatasetTransformationNotAllowedError(\n \"Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.\"\n )\n\n # If the array is empty we do nothing\n if len(self) == 0:\n return self\n\n # Prepare the writer for our indices arrow table\n if keep_in_memory or indices_cache_file_name is None:\n buf_writer = pa.BufferOutputStream()\n tmp_file = None\n writer = ArrowWriter(\n stream=buf_writer, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit=\"indices\"\n )\n else:\n buf_writer = None\n logger.info(f\"Caching indices mapping at {indices_cache_file_name}\")\n tmp_file = tempfile.NamedTemporaryFile(\"wb\", dir=os.path.dirname(indices_cache_file_name), delete=False)\n writer = ArrowWriter(\n path=tmp_file.name, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit=\"indices\"\n )\n\n indices = list(indices)\n\n size = len(self)\n _check_valid_indices_value(int(max(indices)), size=size)\n _check_valid_indices_value(int(min(indices)), size=size)\n\n indices_array = pa.array(indices, type=pa.uint64())\n # Check if we need to convert indices\n if self._indices is not None:\n indices_array = self._indices.column(0).take(indices_array)\n\n indices_table = pa.Table.from_arrays([indices_array], names=[\"indices\"])\n\n with writer:\n try:\n writer.write_table(indices_table)\n writer.finalize() # close_stream=bool(buf_writer is None)) We only close if we are writing in a file\n except (Exception, KeyboardInterrupt):\n if tmp_file is not None:\n tmp_file.close()\n if os.path.exists(tmp_file.name):\n os.remove(tmp_file.name)\n raise\n\n if tmp_file is not None:\n tmp_file.close()\n shutil.move(tmp_file.name, indices_cache_file_name)\n umask = os.umask(0o666)\n os.umask(umask)\n os.chmod(indices_cache_file_name, 0o666 & ~umask)\n\n # Return new Dataset object\n if buf_writer is None:\n return self._new_dataset_with_indices(\n indices_cache_file_name=indices_cache_file_name, fingerprint=new_fingerprint\n )\n else:\n return self._new_dataset_with_indices(indices_buffer=buf_writer.getvalue(), fingerprint=new_fingerprint)\n\n @transmit_format\n @fingerprint_transform(inplace=False, ignore_kwargs=[\"load_from_cache_file\", \"indices_cache_file_name\"])\n def sort(\n self,\n column: str,\n reverse: bool = False,\n kind: str = None,\n null_placement: str = \"last\",\n keep_in_memory: bool = False,\n load_from_cache_file: bool = True,\n indices_cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n new_fingerprint: Optional[str] = None,\n ) -> \"Dataset\":\n \"\"\"Create a new dataset sorted according to a column.\n\n Currently sorting according to a column name uses pandas sorting algorithm under the hood.\n The column should thus be a pandas compatible type (in particular not a nested type).\n This also means that the column used for sorting is fully loaded in memory (which should be fine in most cases).\n\n Args:\n column (:obj:`str`): column name to sort by.\n reverse (:obj:`bool`, default `False`): If True, sort by descending order rather then ascending.\n kind (:obj:`str`, optional): Pandas algorithm for sorting selected in {‘quicksort’, ‘mergesort’, ‘heapsort’, ‘stable’},\n The default is ‘quicksort’. Note that both ‘stable’ and ‘mergesort’ use timsort under the covers and, in general,\n the actual implementation will vary with data type. The ‘mergesort’ option is retained for backwards compatibility.\n null_placement (:obj:`str`, default `last`):\n Put `None` values at the beginning if ‘first‘; ‘last‘ puts `None` values at the end.\n\n .. versionadded:: 1.14.2\n keep_in_memory (:obj:`bool`, default `False`): Keep the sorted indices in memory instead of writing it to a cache file.\n load_from_cache_file (:obj:`bool`, default `True`): If a cache file storing the sorted indices\n can be identified, use it instead of recomputing.\n indices_cache_file_name (`Optional[str]`, default `None`): Provide the name of a path for the cache file. It is used to store the\n sorted indices instead of the automatically generated cache file name.\n writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer.\n Higher value gives smaller cache files, lower value consume less temporary memory.\n new_fingerprint (`Optional[str]`, default `None`): the new fingerprint of the dataset after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments\n \"\"\"\n if len(self.list_indexes()) > 0:\n raise DatasetTransformationNotAllowedError(\n \"Using `.sort` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.\"\n )\n # If the array is empty we do nothing\n if len(self) == 0:\n return self\n\n # Check the column name\n if not isinstance(column, str) or column not in self._data.column_names:\n raise ValueError(\n f\"Column '{column}' not found in the dataset. Please provide a column selected in: {self._data.column_names}\"\n )\n\n # Check if we've already cached this computation (indexed by a hash)\n if self.cache_files:\n if indices_cache_file_name is None:\n # we create a unique hash from the function, current dataset file and the mapping args\n indices_cache_file_name = self._get_cache_file_path(new_fingerprint)\n if os.path.exists(indices_cache_file_name) and load_from_cache_file:\n logger.warning(f\"Loading cached sorted indices for dataset at {indices_cache_file_name}\")\n return self._new_dataset_with_indices(\n fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name\n )\n\n column_data = self._getitem(\n column, format_type=\"pandas\", format_columns=None, output_all_columns=False, format_kwargs=None\n )\n\n df_sorted = column_data.to_frame().sort_values(\n column, ascending=not reverse, kind=kind, na_position=null_placement\n )\n indices = df_sorted.index.to_numpy()\n\n return self.select(\n indices=indices,\n keep_in_memory=keep_in_memory,\n indices_cache_file_name=indices_cache_file_name,\n writer_batch_size=writer_batch_size,\n new_fingerprint=new_fingerprint,\n )\n\n @transmit_format\n @fingerprint_transform(\n inplace=False, randomized_function=True, ignore_kwargs=[\"load_from_cache_file\", \"indices_cache_file_name\"]\n )\n def shuffle(\n self,\n seed: Optional[int] = None,\n generator: Optional[np.random.Generator] = None,\n keep_in_memory: bool = False,\n load_from_cache_file: bool = True,\n indices_cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n new_fingerprint: Optional[str] = None,\n ) -> \"Dataset\":\n \"\"\"Create a new Dataset where the rows are shuffled.\n\n Currently shuffling uses numpy random generators.\n You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).\n\n Args:\n seed (:obj:`int`, optional): A seed to initialize the default BitGenerator if ``generator=None``.\n If None, then fresh, unpredictable entropy will be pulled from the OS.\n If an int or array_like[ints] is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.\n generator (:obj:`numpy.random.Generator`, optional): Numpy random Generator to use to compute the permutation of the dataset rows.\n If ``generator=None`` (default), uses np.random.default_rng (the default BitGenerator (PCG64) of NumPy).\n keep_in_memory (:obj:`bool`, default `False`): Keep the shuffled indices in memory instead of writing it to a cache file.\n load_from_cache_file (:obj:`bool`, default `True`): If a cache file storing the shuffled indices\n can be identified, use it instead of recomputing.\n indices_cache_file_name (:obj:`str`, optional): Provide the name of a path for the cache file. It is used to store the\n shuffled indices instead of the automatically generated cache file name.\n writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer.\n This value is a good trade-off between memory usage during the processing, and processing speed.\n Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.\n new_fingerprint (:obj:`str`, optional, default `None`): the new fingerprint of the dataset after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments\n \"\"\"\n if len(self.list_indexes()) > 0:\n raise DatasetTransformationNotAllowedError(\n \"Using `.shuffle` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.\"\n )\n # If the array is empty we do nothing\n if len(self) == 0:\n return self\n\n if seed is not None and generator is not None:\n raise ValueError(\"Both `seed` and `generator` were provided. Please specify just one of them.\")\n\n if generator is not None and not isinstance(generator, np.random.Generator):\n raise ValueError(\"The provided generator must be an instance of numpy.random.Generator\")\n\n if generator is None:\n if seed is None:\n _, seed, pos, *_ = np.random.get_state()\n seed = seed[pos] if pos < 624 else seed[0]\n _ = np.random.random() # do 1 step of rng\n generator = np.random.default_rng(seed)\n\n # Check if we've already cached this computation (indexed by a hash)\n if self.cache_files:\n if indices_cache_file_name is None:\n # we create a unique hash from the function, current dataset file and the mapping args\n indices_cache_file_name = self._get_cache_file_path(new_fingerprint)\n if os.path.exists(indices_cache_file_name) and load_from_cache_file:\n logger.warning(f\"Loading cached shuffled indices for dataset at {indices_cache_file_name}\")\n return self._new_dataset_with_indices(\n fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name\n )\n\n permutation = generator.permutation(len(self))\n\n return self.select(\n indices=permutation,\n keep_in_memory=keep_in_memory,\n indices_cache_file_name=indices_cache_file_name,\n writer_batch_size=writer_batch_size,\n new_fingerprint=new_fingerprint,\n )\n\n @transmit_format\n @fingerprint_transform(\n inplace=False,\n randomized_function=True,\n fingerprint_names=[\"train_new_fingerprint\", \"test_new_fingerprint\"],\n ignore_kwargs=[\"load_from_cache_file\", \"train_indices_cache_file_name\", \"test_indices_cache_file_name\"],\n )\n def train_test_split(\n self,\n test_size: Union[float, int, None] = None,\n train_size: Union[float, int, None] = None,\n shuffle: bool = True,\n seed: Optional[int] = None,\n generator: Optional[np.random.Generator] = None,\n keep_in_memory: bool = False,\n load_from_cache_file: bool = True,\n train_indices_cache_file_name: Optional[str] = None,\n test_indices_cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n train_new_fingerprint: Optional[str] = None,\n test_new_fingerprint: Optional[str] = None,\n ) -> \"DatasetDict\":\n \"\"\"Return a dictionary (:obj:`datasets.DatsetDict`) with two random train and test subsets (`train` and `test` ``Dataset`` splits).\n Splits are created from the dataset according to `test_size`, `train_size` and `shuffle`.\n\n This method is similar to scikit-learn `train_test_split` with the omission of the stratified options.\n\n Args:\n test_size (:obj:`numpy.random.Generator`, optional): Size of the test split\n If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.\n If int, represents the absolute number of test samples.\n If None, the value is set to the complement of the train size.\n If train_size is also None, it will be set to 0.25.\n train_size (:obj:`numpy.random.Generator`, optional): Size of the train split\n If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split.\n If int, represents the absolute number of train samples.\n If None, the value is automatically set to the complement of the test size.\n shuffle (:obj:`bool`, optional, default `True`): Whether or not to shuffle the data before splitting.\n seed (:obj:`int`, optional): A seed to initialize the default BitGenerator if ``generator=None``.\n If None, then fresh, unpredictable entropy will be pulled from the OS.\n If an int or array_like[ints] is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.\n generator (:obj:`numpy.random.Generator`, optional): Numpy random Generator to use to compute the permutation of the dataset rows.\n If ``generator=None`` (default), uses np.random.default_rng (the default BitGenerator (PCG64) of NumPy).\n keep_in_memory (:obj:`bool`, default `False`): Keep the splits indices in memory instead of writing it to a cache file.\n load_from_cache_file (:obj:`bool`, default `True`): If a cache file storing the splits indices\n can be identified, use it instead of recomputing.\n train_cache_file_name (:obj:`str`, optional): Provide the name of a path for the cache file. It is used to store the\n train split indices instead of the automatically generated cache file name.\n test_cache_file_name (:obj:`str`, optional): Provide the name of a path for the cache file. It is used to store the\n test split indices instead of the automatically generated cache file name.\n writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer.\n This value is a good trade-off between memory usage during the processing, and processing speed.\n Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.\n train_new_fingerprint (:obj:`str`, optional, defaults to `None`): the new fingerprint of the train set after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments\n test_new_fingerprint (:obj:`str`, optional, defaults to `None`): the new fingerprint of the test set after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments\n \"\"\"\n from .dataset_dict import DatasetDict # import here because of circular dependency\n\n if len(self.list_indexes()) > 0:\n raise DatasetTransformationNotAllowedError(\n \"Using `.train_test_split` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.\"\n )\n # If the array is empty we do nothing\n if len(self) == 0:\n return DatasetDict({\"train\": self, \"test\": self})\n\n if test_size is None and train_size is None:\n test_size = 0.25\n\n # Safety checks similar to scikit-learn's ones.\n # (adapted from https://github.com/scikit-learn/scikit-learn/blob/fd237278e895b42abe8d8d09105cbb82dc2cbba7/sklearn/model_selection/_split.py#L1750)\n n_samples = len(self)\n if (\n isinstance(test_size, int)\n and (test_size >= n_samples or test_size <= 0)\n or isinstance(test_size, float)\n and (test_size <= 0 or test_size >= 1)\n ):\n raise ValueError(\n f\"test_size={test_size} should be either positive and smaller \"\n f\"than the number of samples {n_samples} or a float in the (0, 1) range\"\n )\n\n if (\n isinstance(train_size, int)\n and (train_size >= n_samples or train_size <= 0)\n or isinstance(train_size, float)\n and (train_size <= 0 or train_size >= 1)\n ):\n raise ValueError(\n f\"train_size={train_size} should be either positive and smaller \"\n f\"than the number of samples {n_samples} or a float in the (0, 1) range\"\n )\n\n if train_size is not None and not isinstance(train_size, (int, float)):\n raise ValueError(f\"Invalid value for train_size: {train_size} of type {type(train_size)}\")\n if test_size is not None and not isinstance(test_size, (int, float)):\n raise ValueError(f\"Invalid value for test_size: {test_size} of type {type(test_size)}\")\n\n if isinstance(train_size, float) and isinstance(test_size, float) and train_size + test_size > 1:\n raise ValueError(\n f\"The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)\"\n \" range. Reduce test_size and/or train_size.\"\n )\n\n if isinstance(test_size, float):\n n_test = ceil(test_size * n_samples)\n elif isinstance(test_size, int):\n n_test = float(test_size)\n\n if isinstance(train_size, float):\n n_train = floor(train_size * n_samples)\n elif isinstance(train_size, int):\n n_train = float(train_size)\n\n if train_size is None:\n n_train = n_samples - n_test\n elif test_size is None:\n n_test = n_samples - n_train\n\n if n_train + n_test > n_samples:\n raise ValueError(\n f\"The sum of train_size and test_size = {n_train + n_test}, \"\n \"should be smaller than the number of \"\n f\"samples {n_samples}. Reduce test_size and/or \"\n \"train_size.\"\n )\n\n n_train, n_test = int(n_train), int(n_test)\n\n if n_train == 0:\n raise ValueError(\n f\"With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the \"\n \"resulting train set will be empty. Adjust any of the \"\n \"aforementioned parameters.\"\n )\n\n if generator is None and shuffle is True:\n if seed is None:\n _, seed, pos, *_ = np.random.get_state()\n seed = seed[pos] if pos < 624 else seed[0]\n _ = np.random.random() # do 1 step of rng\n generator = np.random.default_rng(seed)\n\n # Check if we've already cached this computation (indexed by a hash)\n if self.cache_files:\n if train_indices_cache_file_name is None or test_indices_cache_file_name is None:\n # we create a unique hash from the function, current dataset file and the mapping args\n\n if train_indices_cache_file_name is None:\n train_indices_cache_file_name = self._get_cache_file_path(train_new_fingerprint)\n if test_indices_cache_file_name is None:\n test_indices_cache_file_name = self._get_cache_file_path(test_new_fingerprint)\n if (\n os.path.exists(train_indices_cache_file_name)\n and os.path.exists(test_indices_cache_file_name)\n and load_from_cache_file\n ):\n logger.warning(\n f\"Loading cached split indices for dataset at {train_indices_cache_file_name} and {test_indices_cache_file_name}\"\n )\n return DatasetDict(\n {\n \"train\": self._new_dataset_with_indices(\n fingerprint=train_new_fingerprint, indices_cache_file_name=train_indices_cache_file_name\n ),\n \"test\": self._new_dataset_with_indices(\n fingerprint=test_new_fingerprint, indices_cache_file_name=test_indices_cache_file_name\n ),\n }\n )\n\n if not shuffle:\n train_indices = np.arange(n_train)\n test_indices = np.arange(n_train, n_train + n_test)\n else:\n # random partition\n permutation = generator.permutation(len(self))\n test_indices = permutation[:n_test]\n train_indices = permutation[n_test : (n_test + n_train)]\n\n train_split = self.select(\n indices=train_indices,\n keep_in_memory=keep_in_memory,\n indices_cache_file_name=train_indices_cache_file_name,\n writer_batch_size=writer_batch_size,\n new_fingerprint=train_new_fingerprint,\n )\n test_split = self.select(\n indices=test_indices,\n keep_in_memory=keep_in_memory,\n indices_cache_file_name=test_indices_cache_file_name,\n writer_batch_size=writer_batch_size,\n new_fingerprint=test_new_fingerprint,\n )\n\n return DatasetDict({\"train\": train_split, \"test\": test_split})\n\n def shard(\n self,\n num_shards: int,\n index: int,\n contiguous: bool = False,\n keep_in_memory: bool = False,\n indices_cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n ) -> \"Dataset\":\n \"\"\"Return the `index`-nth shard from dataset split into `num_shards` pieces.\n\n This shards deterministically. dset.shard(n, i) will contain all elements of dset whose\n index mod n = i.\n\n dset.shard(n, i, contiguous=True) will instead split dset into contiguous chunks,\n so it can be easily concatenated back together after processing. If n % i == l, then the\n first l shards will have length (n // i) + 1, and the remaining shards will have length (n // i).\n `datasets.concatenate([dset.shard(n, i, contiguous=True) for i in range(n)])` will return\n a dataset with the same order as the original.\n\n Be sure to shard before using any randomizing operator (such as shuffle).\n It is best if the shard operator is used early in the dataset pipeline.\n\n\n Args:\n num_shards (:obj:`int`): How many shards to split the dataset into.\n index (:obj:`int`): Which shard to select and return.\n contiguous: (:obj:`bool`, default `False`): Whether to select contiguous blocks of indices for shards.\n keep_in_memory (:obj:`bool`, default `False`): Keep the dataset in memory instead of writing it to a cache file.\n load_from_cache_file (:obj:`bool`, default `True`): If a cache file storing the current computation from `function`\n can be identified, use it instead of recomputing.\n indices_cache_file_name (:obj:`str`, optional): Provide the name of a path for the cache file. It is used to store the\n indices of each shard instead of the automatically generated cache file name.\n writer_batch_size (:obj:`int`, default `1000`): Number of rows per write operation for the cache file writer.\n This value is a good trade-off between memory usage during the processing, and processing speed.\n Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.\n \"\"\"\n if not 0 <= index < num_shards:\n raise ValueError(\"index should be in [0, num_shards-1]\")\n if contiguous:\n div = len(self) // num_shards\n mod = len(self) % num_shards\n start = div * index + min(index, mod)\n end = start + div + (1 if index < mod else 0)\n indices = np.arange(start, end)\n else:\n indices = np.arange(index, len(self), num_shards)\n\n return self.select(\n indices=indices,\n keep_in_memory=keep_in_memory,\n indices_cache_file_name=indices_cache_file_name,\n writer_batch_size=writer_batch_size,\n )\n\n def export(\n self,\n filename: str,\n format: str = \"tfrecord\",\n ):\n \"\"\"Writes the Arrow dataset to a TFRecord file.\n\n The dataset must already be in tensorflow format. The records will be written with\n keys from `dataset._format_columns`.\n\n Args:\n filename (:obj:`str`): The filename, including the `.tfrecord` extension, to write to.\n format (`str`, optional, default `\"tfrecord\"`): The type of output file. Currently this is a no-op, as\n TFRecords are the only option. This enables a more flexible function signature later.\n \"\"\"\n try:\n import tensorflow as tf # noqa: F401\n except ImportError:\n logger.error(\"Tensorflow needs to be installed to be able to return Tensorflow tensors.\")\n\n # From https://www.tensorflow.org/tutorials/load_data/tfrecord\n def _bytes_feature(values):\n \"\"\"Returns a bytes_list from a list of string / byte.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))\n\n def _float_feature(values):\n \"\"\"Returns a float_list from a list of float / double.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=values))\n\n def _int64_feature(values):\n \"\"\"Returns an int64_list from a list of bool / enum / int / uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=values))\n\n def _feature(values: Union[float, int, str, np.ndarray]) -> \"tf.train.Feature\":\n \"\"\"Typechecks `values` and returns the corresponding tf.train.Feature.\"\"\"\n if isinstance(values, np.ndarray):\n if values.dtype == np.dtype(float):\n return _float_feature(values)\n elif values.dtype == np.int64:\n return _int64_feature(values)\n elif values.dtype == np.dtype(str) or (\n values.dtype == np.dtype(object) and len(values) > 0 and isinstance(values[0], str)\n ):\n return _bytes_feature([v.encode() for v in values])\n else:\n raise ValueError(\n f\"values={values} is an np.ndarray with items of dtype {values[0].dtype}, which cannot be serialized\"\n )\n if hasattr(values, \"dtype\"):\n if np.issubdtype(values.dtype, np.floating):\n return _float_feature([values.item()])\n elif np.issubdtype(values.dtype, np.integer):\n return _int64_feature([values.item()])\n elif np.issubdtype(values.dtype, np.str):\n return _bytes_feature([values.item().encode()])\n else:\n raise ValueError(f\"values={values} has dtype {values.dtype}, which cannot be serialized\")\n else:\n raise ValueError(f\"values={values} are not numpy objects, and so cannot be serialized\")\n\n def serialize_example(ex):\n feature = {key: _feature(value) for key, value in ex.items()}\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()\n\n def tf_serialize_example(ex):\n tf_string = tf.py_function(serialize_example, (ex,), tf.string)\n return tf.reshape(tf_string, ())\n\n def generator():\n for ex in self:\n yield serialize_example(ex)\n\n if self._format_type != \"numpy\":\n raise ValueError(\"Dataset format must be numpy before exporting\")\n if not filename.endswith(\".tfrecord\"):\n raise ValueError(\"filename {filename} must end with .tfrecord\")\n tf_dataset = tf.data.Dataset.from_generator(generator, output_types=tf.string, output_shapes=())\n writer = tf.data.experimental.TFRecordWriter(filename)\n logger.info(f\"Writing TFRecord to {filename}\")\n writer.write(tf_dataset)\n logger.info(f\"Finished writing TFRecord to {filename}\")\n self = None # delete the dataset reference used by tf_dataset\n\n def to_csv(\n self,\n path_or_buf: Union[PathLike, BinaryIO],\n batch_size: Optional[int] = None,\n num_proc: Optional[int] = None,\n **to_csv_kwargs,\n ) -> int:\n \"\"\"Exports the dataset to csv\n\n Args:\n path_or_buf (``PathLike`` or ``FileOrBuffer``): Either a path to a file or a BinaryIO.\n batch_size (Optional ``int``): Size of the batch to load in memory and write at once.\n Defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE`.\n num_proc (:obj:`int`, optional): Number of processes for multiprocessing. By default it doesn't\n use multiprocessing. ``batch_size`` in this case defaults to\n :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default\n value if you have sufficient compute power.\n to_csv_kwargs: Parameters to pass to pandas's :func:`pandas.DataFrame.to_csv`\n\n Returns:\n int: The number of characters or bytes written\n \"\"\"\n # Dynamic import to avoid circular dependency\n from .io.csv import CsvDatasetWriter\n\n return CsvDatasetWriter(self, path_or_buf, batch_size=batch_size, num_proc=num_proc, **to_csv_kwargs).write()\n\n def to_dict(self, batch_size: Optional[int] = None, batched: bool = False) -> Union[dict, Iterator[dict]]:\n \"\"\"Returns the dataset as a Python dict. Can also return a generator for large datasets.\n\n Args:\n batched (``bool``): Set to :obj:`True` to return a generator that yields the dataset as batches\n of ``batch_size`` rows. Defaults to :obj:`False` (returns the whole datasetas once)\n batch_size (Optional ``int``): The size (number of rows) of the batches if ``batched`` is `True`.\n Defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE`.\n\n Returns:\n `dict` or `Iterator[dict]`\n \"\"\"\n if not batched:\n return query_table(\n table=self._data,\n key=slice(0, len(self)),\n indices=self._indices if self._indices is not None else None,\n ).to_pydict()\n else:\n batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE\n return (\n query_table(\n table=self._data,\n key=slice(offset, offset + batch_size),\n indices=self._indices if self._indices is not None else None,\n ).to_pydict()\n for offset in range(0, len(self), batch_size)\n )\n\n def to_json(\n self,\n path_or_buf: Union[PathLike, BinaryIO],\n batch_size: Optional[int] = None,\n num_proc: Optional[int] = None,\n **to_json_kwargs,\n ) -> int:\n \"\"\"Export the dataset to JSON Lines or JSON.\n\n Args:\n path_or_buf (``PathLike`` or ``FileOrBuffer``): Either a path to a file or a BinaryIO.\n batch_size (:obj:`int`, optional): Size of the batch to load in memory and write at once.\n Defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE`.\n num_proc (:obj:`int`, optional): Number of processes for multiprocessing. By default it doesn't\n use multiprocessing. ``batch_size`` in this case defaults to\n :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default\n value if you have sufficient compute power.\n lines (:obj:`bool`, default ``True``): Whether output JSON lines format.\n Only possible if ``orient=\"records\"`. It will throw ValueError with ``orient`` different from\n ``\"records\"``, since the others are not list-like.\n orient (:obj:`str`, default ``\"records\"``): Format of the JSON:\n\n - ``\"records\"``: list like ``[{column -> value}, … , {column -> value}]``\n - ``\"split\"``: dict like ``{\"index\" -> [index], \"columns\" -> [columns], \"data\" -> [values]}``\n - ``\"index\"``: dict like ``{index -> {column -> value}}``\n - ``\"columns\"``: dict like ``{column -> {index -> value}}``\n - ``\"values\"``: just the values array\n - ``\"table\"``: dict like ``{\"schema\": {schema}, \"data\": {data}}``\n **to_json_kwargs: Parameters to pass to pandas's `pandas.DataFrame.to_json\n <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html>`_.\n\n Returns:\n int: The number of characters or bytes written.\n \"\"\"\n # Dynamic import to avoid circular dependency\n from .io.json import JsonDatasetWriter\n\n return JsonDatasetWriter(self, path_or_buf, batch_size=batch_size, num_proc=num_proc, **to_json_kwargs).write()\n\n def to_pandas(\n self, batch_size: Optional[int] = None, batched: bool = False\n ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:\n \"\"\"Returns the dataset as a :class:`pandas.DataFrame`. Can also return a generator for large datasets.\n\n Args:\n batched (``bool``): Set to :obj:`True` to return a generator that yields the dataset as batches\n of ``batch_size`` rows. Defaults to :obj:`False` (returns the whole datasetas once)\n batch_size (Optional ``int``): The size (number of rows) of the batches if ``batched`` is `True`.\n Defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE`.\n\n Returns:\n `pandas.DataFrame` or `Iterator[pandas.DataFrame]`\n \"\"\"\n if not batched:\n return query_table(\n table=self._data,\n key=slice(0, len(self)),\n indices=self._indices if self._indices is not None else None,\n ).to_pandas(types_mapper=pandas_types_mapper)\n else:\n batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE\n return (\n query_table(\n table=self._data,\n key=slice(offset, offset + batch_size),\n indices=self._indices if self._indices is not None else None,\n ).to_pandas(types_mapper=pandas_types_mapper)\n for offset in range(0, len(self), batch_size)\n )\n\n def to_parquet(\n self,\n path_or_buf: Union[PathLike, BinaryIO],\n batch_size: Optional[int] = None,\n **parquet_writer_kwargs,\n ) -> int:\n \"\"\"Exports the dataset to parquet\n\n Args:\n path_or_buf (``PathLike`` or ``FileOrBuffer``): Either a path to a file or a BinaryIO.\n batch_size (Optional ``int``): Size of the batch to load in memory and write at once.\n Defaults to :obj:`datasets.config.DEFAULT_MAX_BATCH_SIZE`.\n parquet_writer_kwargs: Parameters to pass to PyArrow's :class:`pyarrow.parquet.ParquetWriter`\n\n Returns:\n int: The number of characters or bytes written\n \"\"\"\n # Dynamic import to avoid circular dependency\n from .io.parquet import ParquetDatasetWriter\n\n return ParquetDatasetWriter(self, path_or_buf, batch_size=batch_size, **parquet_writer_kwargs).write()\n\n def _push_parquet_shards_to_hub(\n self,\n repo_id: str,\n split: Optional[str] = None,\n private: Optional[bool] = False,\n token: Optional[str] = None,\n branch: Optional[str] = None,\n shard_size: Optional[int] = 500 << 20,\n embed_external_files: bool = True,\n ) -> Tuple[str, str, int, int]:\n \"\"\"Pushes the dataset to the hub.\n The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed.\n\n Args:\n repo_id (:obj:`str`):\n The ID of the repository to push to in the following format: `<user>/<dataset_name>` or\n `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace\n of the logged-in user.\n split (Optional, :obj:`str`):\n The name of the split that will be given to that dataset. Defaults to `self.split`.\n private (Optional :obj:`bool`, defaults to :obj:`False`):\n Whether the dataset repository should be set to private or not. Only affects repository creation:\n a repository that already exists will not be affected by that parameter.\n token (Optional :obj:`str`):\n An optional authentication token for the Hugging Face Hub. If no token is passed, will default\n to the token saved locally when logging in with ``huggingface-cli login``. Will raise an error\n if no token is passed and the user is not logged-in.\n branch (Optional :obj:`str`):\n The git branch on which to push the dataset. This defaults to the default branch as specified\n in your repository, which defaults to `\"main\"`.\n shard_size (Optional :obj:`int`):\n The size of the dataset shards to be uploaded to the hub. The dataset will be pushed in files\n of the size specified here, in bytes. Defaults to a shard size of 500MB.\n embed_external_files (:obj:`bool`, default ``True``):\n Whether to embed file bytes in the shards.\n In particular, this will do the following before the push for the fields of type:\n\n - :class:`Audio` and class:`Image`: remove local path information and embed file content in the Parquet files.\n\n Returns:\n repo_id (:obj:`str`): ID of the repository in <user>/<dataset_name>` or `<org>/<dataset_name>` format\n split (:obj:`str`): name of the uploaded split\n uploaded_size (:obj:`int`): number of uploaded bytes\n dataset_nbytes (:obj:`int`): approximate size in bytes of the uploaded dataset afer uncompression\n\n Example:\n .. code-block:: python\n\n >>> dataset.push_to_hub(\"<organization>/<dataset_id>\", split=\"evaluation\")\n \"\"\"\n api = HfApi(endpoint=config.HF_ENDPOINT)\n token = token if token is not None else HfFolder.get_token()\n\n if token is None:\n raise OSError(\n \"You need to provide a `token` or be logged in to Hugging Face with \" \"`huggingface-cli login`.\"\n )\n\n if split is None:\n split = str(self.split) if self.split is not None else \"train\"\n\n identifier = repo_id.split(\"/\")\n\n if len(identifier) > 2:\n raise ValueError(\n f\"The identifier should be in the format <repo_id> or <namespace>/<repo_id>. It is {identifier}, \"\n \"which doesn't conform to either format.\"\n )\n if len(identifier) == 2:\n organization, dataset_name = identifier\n else:\n dataset_name = identifier[0]\n organization = api.whoami(token)[\"name\"]\n repo_id = f\"{organization}/{dataset_name}\"\n\n try:\n api.create_repo(\n dataset_name,\n token,\n repo_type=\"dataset\",\n organization=organization,\n private=private,\n )\n except HTTPError as err:\n if err.response.status_code == 409:\n if private is not None:\n logger.warning(\"The repository already exists: the `private` keyword argument will be ignored.\")\n else:\n raise\n\n # Find decodable columns, because if there are any, we need to:\n # (1) adjust the dataset size computation (needed for sharding) to account for possible external files\n # (2) embed the bytes from the files in the shards\n decodable_columns = (\n [k for k, v in self.features.items() if require_decoding(v, ignore_decode_attribute=True)]\n if embed_external_files\n else []\n )\n\n dataset_nbytes = self.data.nbytes\n\n if decodable_columns:\n # Approximate the space needed to store the bytes from the external files by analyzing the first 1000 examples\n extra_nbytes = 0\n\n def extra_nbytes_visitor(array, feature):\n nonlocal extra_nbytes\n if isinstance(feature, (Audio, Image)):\n for x in array.to_pylist():\n if x[\"bytes\"] is None and x[\"path\"] is not None:\n size = xgetsize(x[\"path\"])\n extra_nbytes += size\n extra_nbytes -= array.field(\"path\").nbytes\n\n table = self.with_format(\"arrow\")[:1000]\n table_visitor(table, extra_nbytes_visitor)\n\n extra_nbytes = extra_nbytes * len(self.data) / len(table)\n dataset_nbytes = dataset_nbytes + extra_nbytes\n\n if self._indices is not None:\n dataset_nbytes = dataset_nbytes * len(self._indices) / len(self.data)\n\n num_shards = int(dataset_nbytes / shard_size) + 1\n num_shards = max(num_shards, 1)\n shards = (self.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards))\n\n if decodable_columns:\n\n def shards_with_embedded_external_files(shards):\n # Temporarily assign the modified version of `cast_storage` before the cast to the decodable\n # feature types to delete path information and embed file content in the arrow file.\n with contextlib.ExitStack() as stack:\n for decodable_feature_type in [Audio, Image]:\n stack.enter_context(\n temporary_assignment(\n decodable_feature_type, \"cast_storage\", decodable_feature_type.embed_storage\n )\n )\n for shard in shards:\n format = shard.format\n shard = shard.with_format(\"arrow\")\n shard = shard.map(\n partial(cast_table_to_features, features=shard.features),\n batched=True,\n batch_size=1000,\n keep_in_memory=True,\n )\n shard = shard.with_format(**format)\n yield shard\n\n shards = shards_with_embedded_external_files(shards)\n\n files = api.list_repo_files(repo_id, repo_type=\"dataset\", revision=branch, token=token)\n files = [file for file in files if file.startswith(\"data/\")]\n\n def path_in_repo(_index):\n return f\"data/{split}-{_index:05d}-of-{num_shards:05d}.parquet\"\n\n # Only delete file shards that don't currently exist. Others will be overwritten if the content is different\n # or will be left intact is the content is identical.\n def should_delete_file(file_name):\n file_to_overwrite = file_name in [path_in_repo(i) for i in range(num_shards)]\n file_from_same_split = file_name.startswith(f\"data/{split}-\")\n\n return file_from_same_split and not file_to_overwrite\n\n file_shards_to_delete = [file for file in files if should_delete_file(file)]\n\n def delete_file(file):\n api.delete_file(file, repo_id=repo_id, token=token, repo_type=\"dataset\", revision=branch)\n\n if len(file_shards_to_delete):\n for file in utils.tqdm(\n file_shards_to_delete,\n desc=\"Deleting unused files from dataset repository\",\n total=len(file_shards_to_delete),\n disable=not utils.is_progress_bar_enabled(),\n ):\n delete_file(file)\n\n uploaded_size = 0\n for index, shard in utils.tqdm(\n enumerate(shards),\n desc=\"Pushing dataset shards to the dataset hub\",\n total=num_shards,\n disable=not utils.is_progress_bar_enabled(),\n ):\n buffer = BytesIO()\n shard.to_parquet(buffer)\n uploaded_size += buffer.tell()\n api.upload_file(\n path_or_fileobj=buffer.getvalue(),\n path_in_repo=path_in_repo(index),\n repo_id=repo_id,\n token=token,\n repo_type=\"dataset\",\n revision=branch,\n identical_ok=True,\n )\n return repo_id, split, uploaded_size, dataset_nbytes\n\n def push_to_hub(\n self,\n repo_id: str,\n split: Optional[str] = None,\n private: Optional[bool] = False,\n token: Optional[str] = None,\n branch: Optional[str] = None,\n shard_size: Optional[int] = 500 << 20,\n embed_external_files: bool = True,\n ):\n \"\"\"Pushes the dataset to the hub.\n The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed.\n\n Args:\n repo_id (:obj:`str`):\n The ID of the repository to push to in the following format: `<user>/<dataset_name>` or\n `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace\n of the logged-in user.\n split (Optional, :obj:`str`):\n The name of the split that will be given to that dataset. Defaults to `self.split`.\n private (Optional :obj:`bool`, defaults to :obj:`False`):\n Whether the dataset repository should be set to private or not. Only affects repository creation:\n a repository that already exists will not be affected by that parameter.\n token (Optional :obj:`str`):\n An optional authentication token for the Hugging Face Hub. If no token is passed, will default\n to the token saved locally when logging in with ``huggingface-cli login``. Will raise an error\n if no token is passed and the user is not logged-in.\n branch (Optional :obj:`str`):\n The git branch on which to push the dataset. This defaults to the default branch as specified\n in your repository, which defaults to `\"main\"`.\n shard_size (Optional :obj:`int`):\n The size of the dataset shards to be uploaded to the hub. The dataset will be pushed in files\n of the size specified here, in bytes. Defaults to a shard size of 500MB.\n embed_external_files (:obj:`bool`, default ``True``):\n Whether to embed file bytes in the shards.\n In particular, this will do the following before the push for the fields of type:\n\n - :class:`Audio` and class:`Image`: remove local path information and embed file content in the Parquet files.\n\n Example:\n .. code-block:: python\n\n >>> dataset.push_to_hub(\"<organization>/<dataset_id>\", split=\"evaluation\")\n \"\"\"\n repo_id, split, uploaded_size, dataset_nbytes = self._push_parquet_shards_to_hub(\n repo_id=repo_id,\n split=split,\n private=private,\n token=token,\n branch=branch,\n shard_size=shard_size,\n embed_external_files=embed_external_files,\n )\n organization, dataset_name = repo_id.split(\"/\")\n info_to_dump = self.info.copy()\n info_to_dump.download_checksums = None\n info_to_dump.download_size = uploaded_size\n info_to_dump.dataset_size = dataset_nbytes\n info_to_dump.size_in_bytes = uploaded_size + dataset_nbytes\n info_to_dump.splits = {\n split: SplitInfo(split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name)\n }\n buffer = BytesIO()\n buffer.write(f'{{\"{organization}--{dataset_name}\": '.encode())\n info_to_dump._dump_info(buffer)\n buffer.write(b\"}\")\n HfApi(endpoint=config.HF_ENDPOINT).upload_file(\n path_or_fileobj=buffer.getvalue(),\n path_in_repo=config.DATASETDICT_INFOS_FILENAME,\n repo_id=repo_id,\n token=token,\n repo_type=\"dataset\",\n revision=branch,\n identical_ok=True,\n )\n\n @transmit_format\n @fingerprint_transform(inplace=False)\n def add_column(self, name: str, column: Union[list, np.array], new_fingerprint: str):\n \"\"\"Add column to Dataset.\n\n .. versionadded:: 1.7\n\n Args:\n name (str): Column name.\n column (list or np.array): Column data to be added.\n\n Returns:\n :class:`Dataset`\n \"\"\"\n column_table = InMemoryTable.from_pydict({name: column})\n _check_column_names(self._data.column_names + column_table.column_names)\n dataset = self.flatten_indices() if self._indices is not None else self\n # Concatenate tables horizontally\n table = concat_tables([dataset._data, column_table], axis=1)\n # Update features\n info = dataset.info.copy()\n info.features.update(Features.from_arrow_schema(column_table.schema))\n table = update_metadata_with_features(table, info.features)\n return Dataset(table, info=info, split=self.split, indices_table=None, fingerprint=new_fingerprint)\n\n def add_faiss_index(\n self,\n column: str,\n index_name: Optional[str] = None,\n device: Optional[int] = None,\n string_factory: Optional[str] = None,\n metric_type: Optional[int] = None,\n custom_index: Optional[\"faiss.Index\"] = None, # noqa: F821\n train_size: Optional[int] = None,\n faiss_verbose: bool = False,\n dtype=np.float32,\n ):\n \"\"\"Add a dense index using Faiss for fast retrieval.\n By default the index is done over the vectors of the specified column.\n You can specify :obj:`device` if you want to run it on GPU (:obj:`device` must be the GPU index).\n You can find more information about Faiss here:\n\n - For `string factory <https://github.com/facebookresearch/faiss/wiki/The-index-factory>`__\n\n Args:\n column (:obj:`str`):\n The column of the vectors to add to the index.\n index_name (Optional :obj:`str`):\n The index_name/identifier of the index.\n This is the index_name that is used to call :func:`datasets.Dataset.get_nearest_examples` or :func:`datasets.Dataset.search`.\n By default it corresponds to `column`.\n device (Optional :obj:`int`):\n If not None, this is the index of the GPU to use.\n By default it uses the CPU.\n string_factory (Optional :obj:`str`):\n This is passed to the index factory of Faiss to create the index.\n Default index class is ``IndexFlat``.\n metric_type (Optional :obj:`int`):\n Type of metric. Ex: faiss.faiss.METRIC_INNER_PRODUCT or faiss.METRIC_L2.\n custom_index (Optional :obj:`faiss.Index`):\n Custom Faiss index that you already have instantiated and configured for your needs.\n train_size (Optional :obj:`int`):\n If the index needs a training step, specifies how many vectors will be used to train the index.\n faiss_verbose (:obj:`bool`, defaults to False):\n Enable the verbosity of the Faiss index.\n dtype (data-type): The dtype of the numpy arrays that are indexed.\n Default is ``np.float32``.\n\n Example:\n .. code-block:: python\n\n ds = datasets.load_dataset('crime_and_punish', split='train')\n ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']}))\n ds_with_embeddings.add_faiss_index(column='embeddings')\n # query\n scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', embed('my new query'), k=10)\n # save index\n ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss')\n\n ds = datasets.load_dataset('crime_and_punish', split='train')\n # load index\n ds.load_faiss_index('embeddings', 'my_index.faiss')\n # query\n scores, retrieved_examples = ds.get_nearest_examples('embeddings', embed('my new query'), k=10)\n \"\"\"\n with self.formatted_as(type=\"numpy\", columns=[column], dtype=dtype):\n super().add_faiss_index(\n column=column,\n index_name=index_name,\n device=device,\n string_factory=string_factory,\n metric_type=metric_type,\n custom_index=custom_index,\n train_size=train_size,\n faiss_verbose=faiss_verbose,\n )\n return self\n\n def add_faiss_index_from_external_arrays(\n self,\n external_arrays: np.array,\n index_name: str,\n device: Optional[int] = None,\n string_factory: Optional[str] = None,\n metric_type: Optional[int] = None,\n custom_index: Optional[\"faiss.Index\"] = None, # noqa: F821\n train_size: Optional[int] = None,\n faiss_verbose: bool = False,\n dtype=np.float32,\n ):\n \"\"\"Add a dense index using Faiss for fast retrieval.\n The index is created using the vectors of `external_arrays`.\n You can specify `device` if you want to run it on GPU (`device` must be the GPU index).\n You can find more information about Faiss here:\n\n - For `string factory <https://github.com/facebookresearch/faiss/wiki/The-index-factory>`__\n\n Args:\n external_arrays (:obj:`np.array`):\n If you want to use arrays from outside the lib for the index, you can set :obj:`external_arrays`.\n It will use :obj:`external_arrays` to create the Faiss index instead of the arrays in the given :obj:`column`.\n index_name (:obj:`str`):\n The index_name/identifier of the index.\n This is the index_name that is used to call :func:`datasets.Dataset.get_nearest_examples` or :func:`datasets.Dataset.search`.\n device (Optional :obj:`int`):\n If not None, this is the index of the GPU to use.\n By default it uses the CPU.\n string_factory (Optional :obj:`str`):\n This is passed to the index factory of Faiss to create the index.\n Default index class is ``IndexFlat``.\n metric_type (Optional :obj:`int`):\n Type of metric. Ex: faiss.faiss.METRIC_INNER_PRODUCT or faiss.METRIC_L2.\n custom_index (Optional :obj:`faiss.Index`):\n Custom Faiss index that you already have instantiated and configured for your needs.\n train_size (Optional :obj:`int`):\n If the index needs a training step, specifies how many vectors will be used to train the index.\n faiss_verbose (:obj:`bool`, defaults to False):\n Enable the verbosity of the Faiss index.\n dtype (:obj:`numpy.dtype`): The dtype of the numpy arrays that are indexed. Default is np.float32.\n \"\"\"\n super().add_faiss_index_from_external_arrays(\n external_arrays=external_arrays.astype(dtype),\n index_name=index_name,\n device=device,\n string_factory=string_factory,\n metric_type=metric_type,\n custom_index=custom_index,\n train_size=train_size,\n faiss_verbose=faiss_verbose,\n )\n\n def add_elasticsearch_index(\n self,\n column: str,\n index_name: Optional[str] = None,\n host: Optional[str] = None,\n port: Optional[int] = None,\n es_client: Optional[\"elasticsearch.Elasticsearch\"] = None, # noqa: F821\n es_index_name: Optional[str] = None,\n es_index_config: Optional[dict] = None,\n ):\n \"\"\"Add a text index using ElasticSearch for fast retrieval. This is done in-place.\n\n Args:\n column (:obj:`str`):\n The column of the documents to add to the index.\n index_name (Optional :obj:`str`):\n The index_name/identifier of the index.\n This is the index name that is used to call :meth:`Dataset.get_nearest_examples` or :meth:`Dataset.search`.\n By default it corresponds to :obj:`column`.\n host (Optional :obj:`str`, defaults to localhost):\n host of where ElasticSearch is running\n port (Optional :obj:`str`, defaults to 9200):\n port of where ElasticSearch is running\n es_client (Optional :obj:`elasticsearch.Elasticsearch`):\n The elasticsearch client used to create the index if host and port are None.\n es_index_name (Optional :obj:`str`):\n The elasticsearch index name used to create the index.\n es_index_config (Optional :obj:`dict`):\n The configuration of the elasticsearch index.\n Default config is::\n\n {\n \"settings\": {\n \"number_of_shards\": 1,\n \"analysis\": {\"analyzer\": {\"stop_standard\": {\"type\": \"standard\", \" stopwords\": \"_english_\"}}},\n },\n \"mappings\": {\n \"properties\": {\n \"text\": {\n \"type\": \"text\",\n \"analyzer\": \"standard\",\n \"similarity\": \"BM25\"\n },\n }\n },\n }\n\n Example:\n .. code-block:: python\n\n es_client = elasticsearch.Elasticsearch()\n ds = datasets.load_dataset('crime_and_punish', split='train')\n ds.add_elasticsearch_index(column='line', es_client=es_client, es_index_name=\"my_es_index\")\n scores, retrieved_examples = ds.get_nearest_examples('line', 'my new query', k=10)\n\n \"\"\"\n with self.formatted_as(type=None, columns=[column]):\n super().add_elasticsearch_index(\n column=column,\n index_name=index_name,\n host=host,\n port=port,\n es_client=es_client,\n es_index_name=es_index_name,\n es_index_config=es_index_config,\n )\n return self\n\n @transmit_format\n @fingerprint_transform(inplace=False)\n def add_item(self, item: dict, new_fingerprint: str):\n \"\"\"Add item to Dataset.\n\n .. versionadded:: 1.7\n\n Args:\n item (dict): Item data to be added.\n\n Returns:\n :class:`Dataset`\n \"\"\"\n item_table = InMemoryTable.from_pydict({k: [v] for k, v in item.items()})\n # We don't call _check_if_features_can_be_aligned here so this cast is \"unsafe\"\n dset_features, item_features = _align_features([self.features, Features.from_arrow_schema(item_table.schema)])\n # Cast to align the schemas of the tables and concatenate the tables\n table = concat_tables(\n [\n self._data.cast(dset_features.arrow_schema) if self.features != dset_features else self._data,\n item_table.cast(item_features.arrow_schema),\n ]\n )\n if self._indices is None:\n indices_table = None\n else:\n item_indices_array = pa.array([len(self._data)], type=pa.uint64())\n item_indices_table = InMemoryTable.from_arrays([item_indices_array], names=[\"indices\"])\n indices_table = concat_tables([self._indices, item_indices_table])\n info = self.info.copy()\n info.features.update(item_features)\n table = update_metadata_with_features(table, info.features)\n return Dataset(\n table,\n info=info,\n split=self.split,\n indices_table=indices_table,\n fingerprint=new_fingerprint,\n )\n\n def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> \"Dataset\":\n \"\"\"Align the dataset's label ID and label name mapping to match an input :obj:`label2id` mapping.\n This is useful when you want to ensure that a model's predicted labels are aligned with the dataset.\n The alignment in done using the lowercase label names.\n\n Args:\n label2id (:obj:`dict`):\n The label name to ID mapping to align the dataset with.\n label_column (:obj:`str`):\n The column name of labels to align on.\n\n Example:\n .. code-block:: python\n\n # dataset with mapping {'entailment': 0, 'neutral': 1, 'contradiction': 2}\n ds = load_dataset(\"glue\", \"mnli\", split=\"train\")\n # mapping to align with\n label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2}\n ds_aligned = ds.align_labels_with_mapping(label2id, \"label\")\n \"\"\"\n # Sanity checks\n if label_column not in self._data.column_names:\n raise ValueError(f\"Column ({label_column}) not in table columns ({self._data.column_names}).\")\n\n label_feature = self.features[label_column]\n if not isinstance(label_feature, ClassLabel):\n raise ValueError(\n f\"Aligning labels with a mapping is only supported for {ClassLabel.__name__} column, and column {label_feature} is {type(label_feature).__name__}.\"\n )\n\n # Sort input mapping by ID value to ensure the label names are aligned\n label2id = dict(sorted(label2id.items(), key=lambda item: item[1]))\n label_names = list(label2id.keys())\n # Some label mappings use uppercase label names so we lowercase them during alignment\n label2id = {k.lower(): v for k, v in label2id.items()}\n int2str_function = label_feature.int2str\n\n def process_label_ids(batch):\n dset_label_names = [\n int2str_function(label_id).lower() if label_id is not None else None\n for label_id in batch[label_column]\n ]\n batch[label_column] = [\n label2id[label_name] if label_name is not None else None for label_name in dset_label_names\n ]\n return batch\n\n features = self.features.copy()\n features[label_column] = ClassLabel(num_classes=len(label_names), names=label_names)\n return self.map(process_label_ids, features=features, batched=True, desc=\"Aligning the labels\")\n\n\ndef concatenate_datasets(\n dsets: List[Dataset],\n info: Optional[Any] = None,\n split: Optional[Any] = None,\n axis: int = 0,\n):\n \"\"\"\n Converts a list of :class:`Dataset` with the same schema into a single :class:`Dataset`.\n\n Args:\n dsets (:obj:`List[datasets.Dataset]`): List of Datasets to concatenate.\n info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc.\n split (:class:`NamedSplit`, optional): Name of the dataset split.\n axis (``{0, 1}``, default ``0``, meaning over rows):\n Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns\n (horizontally).\n\n .. versionadded:: 1.6.0\n \"\"\"\n # Ignore datasets with no rows\n if any(dset.num_rows > 0 for dset in dsets):\n dsets = [dset for dset in dsets if dset.num_rows > 0]\n else:\n # Return first dataset if all datasets are empty\n return dsets[0]\n\n # Perform checks (and a potentional cast if axis=0)\n if axis == 0:\n _check_if_features_can_be_aligned([dset.features for dset in dsets])\n else:\n if not all([dset.num_rows == dsets[0].num_rows for dset in dsets]):\n raise ValueError(\"Number of rows must match for all datasets\")\n _check_column_names([col_name for dset in dsets for col_name in dset._data.column_names])\n\n # Find common format or reset format\n format = dsets[0].format\n if any(dset.format != format for dset in dsets):\n format = {}\n logger.info(\"Some of the datasets have disparate format. Resetting the format of the concatenated dataset.\")\n\n def apply_offset_to_indices_table(table, offset):\n if offset == 0:\n return table\n else:\n array = table[\"indices\"]\n new_array = pc.add(array, pa.scalar(offset, type=pa.uint64()))\n return InMemoryTable.from_arrays([new_array], names=[\"indices\"])\n\n # Concatenate indices if they exist\n if any(dset._indices is not None for dset in dsets):\n if axis == 0:\n # Datasets with no indices tables are replaced with a dataset with an indices table in memory.\n # Applying an offset to an indices table also brings the table in memory.\n indices_tables = []\n for i in range(len(dsets)):\n if dsets[i]._indices is None:\n dsets[i] = dsets[i].select(range(len(dsets[i])))\n indices_tables.append(dsets[i]._indices)\n\n # An offset needs to be applied to the indices before concatenating\n offset = 0\n for i in range(len(dsets)):\n indices_tables[i] = apply_offset_to_indices_table(indices_tables[i], offset)\n offset += len(dsets[i]._data)\n\n # Concatenate indices\n indices_tables = [t for t in indices_tables if len(t) > 0]\n if indices_tables:\n indices_table = concat_tables(indices_tables)\n else:\n indices_table = InMemoryTable.from_batches([], schema=pa.schema({\"indices\": pa.int64()}))\n else:\n if len(dsets) == 1:\n indices_table = dsets[0]._indices\n else:\n for i in range(len(dsets)):\n dsets[i] = dsets[i].flatten_indices()\n indices_table = None\n else:\n indices_table = None\n\n table = concat_tables([dset._data for dset in dsets], axis=axis)\n if axis == 0:\n features_list = _align_features([dset.features for dset in dsets])\n else:\n features_list = [dset.features for dset in dsets]\n table = update_metadata_with_features(table, {k: v for features in features_list for k, v in features.items()})\n\n # Concatenate infos\n if info is None:\n info = DatasetInfo.from_merge([dset.info for dset in dsets])\n fingerprint = update_fingerprint(\n \"\".join(dset._fingerprint for dset in dsets), concatenate_datasets, {\"info\": info, \"split\": split}\n )\n\n # Make final concatenated dataset\n concatenated_dataset = Dataset(\n table,\n info=info,\n split=split,\n indices_table=indices_table,\n fingerprint=fingerprint,\n )\n concatenated_dataset.set_format(**format)\n return concatenated_dataset\n\n\n# This is outside Dataset.filter as it needs to be picklable for multiprocessing\n\n\ndef get_indices_from_mask_function(\n function: Callable,\n batched: bool,\n with_indices: bool,\n input_columns: Optional[Union[str, List[str]]],\n indices_mapping: Optional[Table] = None,\n *args,\n **fn_kwargs,\n):\n if batched:\n # we extract indices from args\n *inputs, indices = args\n if with_indices:\n mask = function(*inputs, indices, **fn_kwargs)\n else:\n mask = function(*inputs, **fn_kwargs)\n else:\n # we get batched data (to do less look-ups) but `function` only accepts one example\n # therefore we need to call `function` on each example of the batch to get the mask\n *inputs, indices = args\n mask = []\n if input_columns is None:\n # inputs only contains a batch of examples\n batch: dict = inputs[0]\n num_examples = len(batch[next(iter(batch.keys()))])\n for i in range(num_examples):\n example = {key: batch[key][i] for key in batch}\n mask.append(\n function(example, indices[i], **fn_kwargs) if with_indices else function(example, **fn_kwargs)\n )\n else:\n # inputs is a list of columns\n columns: List[List[Any]] = inputs\n num_examples = len(columns[0])\n for i in range(num_examples):\n input = [column[i] for column in columns]\n mask.append(\n function(*input, indices[i], **fn_kwargs) if with_indices else function(*input, **fn_kwargs)\n )\n indices_array = [i for i, to_keep in zip(indices, mask) if to_keep]\n if indices_mapping is not None:\n indices_array = pa.array(indices_array, type=pa.uint64())\n indices_array = indices_mapping.column(0).take(indices_array)\n indices_array = indices_array.to_pylist()\n return {\"indices\": indices_array}\n"
] |
[
[
"tensorflow.ensure_shape",
"numpy.issubdtype",
"numpy.dtype",
"tensorflow.data.Dataset.from_generator",
"numpy.random.default_rng",
"tensorflow.train.Int64List",
"numpy.arange",
"tensorflow.shape",
"tensorflow.train.BytesList",
"tensorflow.train.FloatList",
"tensorflow.train.Features",
"tensorflow.py_function",
"numpy.array",
"tensorflow.dtypes.as_dtype",
"numpy.random.get_state",
"numpy.random.random",
"tensorflow.reshape",
"tensorflow.data.experimental.TFRecordWriter",
"tensorflow.TensorSpec"
]
] |
fxnnxc/mdpi_drone_control
|
[
"94b46449692b4a4c7c3777e8aa07edf6ed6842f5"
] |
[
"env.py"
] |
[
"from operator import mul\nfrom pickle import NONE\nimport warnings\nfrom drone.control.BaseControl import BaseControl\nfrom drone.envs.WindAviary import WindAviary\nfrom drone.train.predict_model.model import Predictor\nfrom ray.rllib.env.multi_agent_env import MultiAgentEnv \nimport numpy as np \nfrom gym.spaces import Discrete, Box\nimport torch \nimport copy\nfrom scipy.stats import mode \n\nclass RLCorrectionEnv(WindAviary, MultiAgentEnv):\n def __init__(self, config={}):\n np.random.seed(123)\n\n d = 1.0\n super().__init__(wind_magnitude = config.get(\"wind_magnitude\",0.04), \n wind_field=config.get(\"wind_field\"),\n freq=240, \n initial_xyzs=np.array([[1,0,1]]), \n num_drones=1, \n gui=config.get(\"gui\", None), \n trajectory=config.get(\"trajectory\", [np.array([1,1,1]), np.array([1-d,1,1]), np.array([1-d, 1-d, 1]), np.array([1, 1-d , 1]), np.array([1,1,1])]),\n trajectory_epsilon=config.get(\"trajectory_epsilon\", 0.05),\n trajectory_num_in_sec=config.get(\"trajectory_num_in_sec\", 0.3),\n debug_mode=config.get(\"debug_mode\"))\n \n self.action_space = self._actionSpace()\n self.observation_space = self._observationSpace()\n self.EPISODE_LEN_SEC = config.get(\"EPISODE_LEN_SEC\", 50)\n \n self.predict_model = torch.load(config.get(\"predict_model_path\", \"C:/Users/sail/Desktop/git/mdpi_drone_paper/drone/train/predict_model/predictor.pt\"))\n self.predict_model.eval()\n \n self.init_position_freq_ratio = config.get(\"position_freq_ratio\", 0.3)\n self.ctrl = BaseControl(self.DRONE_MODEL, gamma=self.init_position_freq_ratio)\n self.reset_counter = 0 \n self.h_mode_list = [] \n self.config = config \n\n def _observationSpace(self):\n return Box(-np.inf, np.inf, shape=(26,))\n\n def _actionSpace(self):\n return Discrete(5)\n\n def reset(self):\n self.reset_counter+=1\n self.INIT_XYZS = np.array([self.trajectory[0]]) \n self.ctrl = BaseControl(self.DRONE_MODEL, gamma=self.init_position_freq_ratio)\n obs = super().reset()\n # self.trajectory = self.trajectory + self.trajectory\n self.wind_error = np.zeros(3)\n self.step_counter = 0\n self.done_condition = None \n \n #return {0:self._obs()}\n self.h = np.random.randint(1, 240)//10 * 10\n self.positions_for_store = [] \n self.infos = [] \n obs_ = [0 for _ in range(26)]\n self.h_list = []\n if self.reset_counter ==1:\n self.wind_field = self.config.get(\"wind_field\")\n else:\n self.wind_field = lambda pos: self.config.get(\"wind_field\")(pos) + (np.random.random(3,)-0.5)/20\n\n return {0:obs_}\n\n def step(self, action):\n # --- compute the action\n action = action[0]\n if action==0:\n action = -1 \n elif action ==2:\n action = 1\n else:\n action = 0\n self.ctrl.h = max(2, self.ctrl.h + action * self.ctrl.H // 10)\n self.ctrl.h = np.clip(self.ctrl.h, 1, self.ctrl.H) \n self.h_list.append(self.ctrl.h)\n \n control_obs = []\n wind_errors = []\n\n done={\"__all__\":False}\n # self.ctrl.control_counter = 0\n for i in range(self.ctrl.H):\n # --- predict the error of PID before wind effect\n self.position_before_control = self._getDroneStateVector(0)[:3]\n with torch.no_grad():\n relative_sp = self.sp - self.position_before_control\n model_input = Predictor.get_model_input_from_env(self)\n # model_input = self.predict_model.get_model_input_from_env(self)\n self.predicted_position_difference = self.predict_model(torch.tensor(model_input, dtype=torch.float32)).detach().numpy().reshape(3,)\n\n state, reward, done, info = super().step(self.sp)\n\n self.wind_error = (self._getDroneStateVector(0)[:3] - self.position_before_control) - self.predicted_position_difference\n self.wind_error = np.zeros(3,) #self.get_wind_direction(0)\n\n wind_errors.append(self.wind_error)\n current_state = self._getDroneStateVector(0)\n current_state[:3] = 0 \n control_obs.append(current_state)\n self.positions_for_store.append(self.current_position)\n self.infos.append(self._getDroneStateVector(0).copy())\n \n done = {0:self._computeDone()}\n done['__all__'] = self._computeDone()\n if done[0]:\n break \n # --- pooling the control observation ---\n control_obs = self.pooling(control_obs, wind_errors)\n\n if done['__all__']:\n if self.done_condition == 'die':\n reward = -2\n elif self.done_condition ==\"reach_all\":\n reward = (self.SIM_FREQ*self.EPISODE_LEN_SEC)/(self.step_counter)\n else:\n reward = -1\n print(self.h_list, end=\" : \")\n print(reward)\n self.h_mode_list.append(mode(self.h_list)[0][0])\n self.h_mode_list.append(np.mean(self.h_list))\n \n # np.save(\"../../../../../../../analysis/data/2h_mode.npy\", np.array(self.h_mode_list))\n if self.reset_counter%1==0: \n name = \"wo\"\n np.save(f\"../../../../../../../analysis/data/1_position_{name}QFP.npy\", np.array(self.positions_for_store))\n np.save(f\"../../../../../../../analysis/data/1_state_{name}QFP.npy\", np.array(self.infos))\n else:\n reward = 0 \n self.step_counter += 1\n return {0:control_obs}, {0:reward}, done, info\n\n def pooling(self, control_obs, wind_errors):\n # return the pooled control observation \n pooled = np.mean(control_obs, 0) # mean pooling over the timesteps.\n if self.wind_magnitude !=0:\n mean, std = np.mean(wind_errors), np.std(wind_errors)\n else:\n mean, std = 0,0\n pooled = np.append(pooled, [mean, std, self.ctrl.h/self.ctrl.H])\n if self.trajectory_position < len(self.trajectory)-1:\n next_way_points = (self.trajectory[self.trajectory_position+1:,:] -self.current_position)\n for i in range(next_way_points.shape[0]):\n next_way_points[i,:] *= 1/(1+i)\n pooled = np.append(pooled, next_way_points.mean(axis=0))\n else:\n pooled = np.append(pooled, [0,0,0])\n return pooled\n\n # def _obs(self):\n # # wind error + drone kinematic information + current ratio\n # new_state = np.hstack([self.wind_error, self._getDroneStateVector(0), np.array(self.ctrl.gamma)])\n # return new_state\n\n def get_predictor_input(self, relatvie_sp):\n # kinematic information without current position \n # + relative setpoint \n # + current pid position error\n cur_state = self._getDroneStateVector(0)[3:].reshape(1,-1)\n pos_error = self.ctrl.pos_err.reshape(1, -1)\n relatvie_sp = relatvie_sp.reshape(1,-1)\n ratio = np.array([self.ctrl.gamma])\n if len(ratio.shape)==1:\n ratio = np.expand_dims(ratio, 0)\n\n x = np.hstack([cur_state, relatvie_sp, pos_error, ratio])\n\n return x\n\n def _computeDone(self):\n done=False\n if self._getDroneStateVector(0)[2] < 0.1:\n done = True\n self.done_condition = 'die'\n if len(self.trajectory) == len(self.trajectory_timesteps):\n self.done_condition = 'reach_all'\n done =True\n if self.step_counter >= self.EPISODE_LEN_SEC * self.SIM_FREQ:\n done=True \n return done\n\n\n\nif __name__ == \"__main__\":\n import pybullet as p \n trajectory_ = [np.array([1/2*(-1)**(i%2),1/2*(-1)**(i%2),1]) for i in range(10) ]\n trajectory_ = [np.array([((-1)**i)/5, ((-1)**i)*np.cos(i)+np.random.random(), 2+ np.sin(i/18)+ np.cos(i/8)]) for i in range(5)]\n d = 1.0\n trajectory1 = [np.array([1,1,1]), np.array([1-d,1,1]), np.array([1-d, 1-d, 1]), np.array([1, 1-d , 1]), np.array([1,1,1])]\n trajectory2 = [np.array([1,1,1]), np.array([1+d,1,1]), np.array([1,1,1]), np.array([1+d,1,1]), np.array([1,1,1])]\n trajectory3 = [np.array([1,1,1]), np.array([-1,1+d,1]), np.array([-1, 1+2*d, 1+d]), np.array([1, 1+3*d, 1+2*d]), np.array([1,1+3*d, 1+3*d])]\n trajectory4 = [np.array([1,1.2,1]), np.array([1,1,1+d]), np.array([1,1,1]), np.array([1,1,1+d]), np.array([1,1,1])]\n random_int_trajectory = np.array([[np.random.randint(-3,3), np.random.randint(-3,3), np.random.randint(1,5)] for i in range(np.random.randint(6,7))])\n \n print(\"----Trajectory-------\")\n print(random_int_trajectory)\n print(\"LEN TRAJECTORY: %d\"%len(random_int_trajectory))\n print(\"--------------------\")\n\n random_max_dist = np.random.random()\n for num in [1,2,3,4]:\n print(\"WAY POINT IN SEC: %d\"%num, end= \" | \")\n\n for t in [0.1, 0.4 , 0.5, 0.99, 1.0]:\n env = RLCorrectionEnv({\"gui\": False, \n \"wind_magnitude\":0.04, \n \"predict_model_path\":\"../../predict_model/predictor.pt\", \n \"use_prediction\":True,\n #\"trajectory\": np.array(random_int_trajectory),\n \"trajectory\": np.array(trajectory1),\n \"wind_schedule\":[np.array([(-1)**(i%2),(-1)**(i%2),1]) for i in range(480)],\n \"trajectory_epsilon\":0.05,\n \"EPISODE_LEN_SEC\":50,\n \"position_freq_ratio\":t,\n \"trajectory_num_in_sec\":num})\n env = RLCorrectionEnv({\"gui\": False, \n \n \n #\"position_freq_ratio\":t,\n \"trajectory_num_in_sec\":num})\n if t==0.05:\n print(\"LEN TRAJECTORY: %d\"%len(env.trajectory))\n env.reset()\n print(env.ctrl.h)\n summ = 0 \n count = 0\n total_reward = 0 \n while True:\n action = env.action_space.sample()\n #action = 1\n state, reward, done, info = env.step({0:action})\n total_reward += reward[0]\n if env.wind_error is not None:\n summ += np.linalg.norm(env.wind_error)\n count += 1\n if done['__all__'] is True:\n break \n # print(\"action: %d | postion hertz: %d\"%(action, env.ctrl.h), end=\" | \")\n # print(\"pos:\", env._getDroneStateVector(0)[:3])\n \n print(\"positon control hertz : \", env.ctrl.h,\" | reward:%3.6f\"%total_reward, \" | reach time:\", env.trajectory_timesteps)\n"
] |
[
[
"numpy.hstack",
"numpy.expand_dims",
"numpy.random.random",
"numpy.random.seed",
"numpy.clip",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"torch.tensor",
"numpy.append",
"numpy.std",
"numpy.mean",
"torch.no_grad",
"scipy.stats.mode",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] |
nfi/edgepipes
|
[
"6fe4826949f44583788a2ec8da8f4d64404db1f1"
] |
[
"handtracker/hand_tracker.py"
] |
[
"import csv\nimport cv2\nimport numpy as np\nimport tensorflow as tf\n\ndef draw_hand(frame, kp):\n # the points where we go back to the first midpoint of the hand\n clearpoints = [4,8,12,16,20]\n lk = None\n p = 0\n\n # Draw the hand\n for keypoint in kp:\n if lk is not None:\n cv2.line(frame, (int(keypoint[0]),int(keypoint[1])),(int(lk[0]),int(lk[1])), (255,0,255), 2)\n lk = keypoint\n cv2.circle(frame, (int(keypoint[0]), int(keypoint[1])), 3, (0,255,255), -1)\n if p in clearpoints:\n lk = kp[0]\n p = p + 1\n\ndef draw_box(frame, box):\n # draw the box\n for i in range(0,4):\n cv2.line(frame, (int(box[i][0]),int(box[i][1])),(int(box[(i+1)&3][0]),int(box[(i+1)&3][1])), (255,255,255), 2)\n\n\nclass HandTracker():\n r\"\"\"\n Class to use Google's Mediapipe HandTracking pipeline from Python.\n So far only detection of a single hand is supported.\n Any any image size and aspect ratio supported.\n\n Args:\n palm_model: path to the palm_detection.tflite\n joint_model: path to the hand_landmark.tflite\n anchors_path: path to the csv containing SSD anchors\n Ourput:\n (21,2) array of hand joints.\n Examples::\n >>> det = HandTracker(path1, path2, path3)\n >>> input_img = np.random.randint(0,255, 256*256*3).reshape(256,256,3)\n >>> keypoints, bbox = det(input_img)\n \"\"\"\n\n def __init__(self, palm_model, joint_model, anchors_path,\n box_enlarge=1.5, box_shift=0.2):\n self.box_shift = box_shift\n self.box_enlarge = box_enlarge\n\n self.interp_palm = tf.lite.Interpreter(palm_model)\n self.interp_palm.allocate_tensors()\n self.interp_joint = tf.lite.Interpreter(joint_model)\n self.interp_joint.allocate_tensors()\n \n # reading the SSD anchors\n with open(anchors_path, \"r\") as csv_f:\n self.anchors = np.r_[\n [x for x in csv.reader(csv_f, quoting=csv.QUOTE_NONNUMERIC)]\n ]\n # reading tflite model paramteres\n output_details = self.interp_palm.get_output_details()\n input_details = self.interp_palm.get_input_details()\n \n self.in_idx = input_details[0]['index']\n self.out_reg_idx = output_details[0]['index']\n self.out_clf_idx = output_details[1]['index']\n \n self.in_idx_joint = self.interp_joint.get_input_details()[0]['index']\n self.out_idx_joint = self.interp_joint.get_output_details()[0]['index']\n\n # 90° rotation matrix used to create the alignment trianlge \n self.R90 = np.r_[[[0,1],[-1,0]]]\n\n # trianlge target coordinates used to move the detected hand\n # into the right position\n self._target_triangle = np.float32([\n [128, 128],\n [128, 0],\n [ 0, 128]\n ])\n self._target_box = np.float32([\n [ 0, 0, 1],\n [256, 0, 1],\n [256, 256, 1],\n [ 0, 256, 1],\n ])\n \n def _get_triangle(self, kp0, kp2, dist=1):\n \"\"\"get a triangle used to calculate Affine transformation matrix\"\"\"\n\n dir_v = kp2 - kp0\n dir_v /= np.linalg.norm(dir_v)\n\n dir_v_r = dir_v @ self.R90.T\n return np.float32([kp2, kp2+dir_v*dist, kp2 + dir_v_r*dist])\n\n @staticmethod\n def _triangle_to_bbox(source):\n # plain old vector arithmetics\n bbox = np.c_[\n [source[2] - source[0] + source[1]],\n [source[1] + source[0] - source[2]],\n [3 * source[0] - source[1] - source[2]],\n [source[2] - source[1] + source[0]],\n ].reshape(-1,2)\n return bbox\n \n @staticmethod\n def _im_normalize(img):\n return np.ascontiguousarray(\n 2 * ((img / 255) - 0.5\n ).astype('float32'))\n \n @staticmethod\n def _sigm(x):\n return 1 / (1 + np.exp(-x) )\n \n @staticmethod\n def _pad1(x):\n return np.pad(x, ((0,0),(0,1)), constant_values=1, mode='constant')\n \n \n def predict_joints(self, img_norm):\n self.interp_joint.set_tensor(\n self.in_idx_joint, img_norm.reshape(1,256,256,3))\n self.interp_joint.invoke()\n\n joints = self.interp_joint.get_tensor(self.out_idx_joint)\n return joints.reshape(-1,2)\n\n def detect_hand(self, img_norm):\n assert -1 <= img_norm.min() and img_norm.max() <= 1,\\\n \"img_norm should be in range [-1, 1]\"\n assert img_norm.shape == (256, 256, 3),\\\n \"img_norm shape must be (256, 256, 3)\"\n\n # predict hand location and 7 initial landmarks\n self.interp_palm.set_tensor(self.in_idx, img_norm[None])\n self.interp_palm.invoke()\n\n out_reg = self.interp_palm.get_tensor(self.out_reg_idx)[0]\n out_clf = self.interp_palm.get_tensor(self.out_clf_idx)[0,:,0]\n\n # finding the best prediction\n # TODO: replace it with non-max suppression\n detecion_mask = self._sigm(out_clf) > 0.7\n candidate_detect = out_reg[detecion_mask]\n candidate_anchors = self.anchors[detecion_mask]\n\n if candidate_detect.shape[0] == 0:\n print(\"No hands found\")\n return None, None\n # picking the widest suggestion while NMS is not implemented\n max_idx = np.argmax(candidate_detect[:, 3])\n\n # bounding box offsets, width and height\n dx,dy,w,h = candidate_detect[max_idx, :4]\n center_wo_offst = candidate_anchors[max_idx,:2] * 256\n \n # 7 initial keypoints\n keypoints = center_wo_offst + candidate_detect[max_idx,4:].reshape(-1,2)\n side = max(w,h) * self.box_enlarge\n \n # now we need to move and rotate the detected hand for it to occupy a\n # 256x256 square\n # line from wrist keypoint to middle finger keypoint\n # should point straight up\n # TODO: replace triangle with the bbox directly\n source = self._get_triangle(keypoints[0], keypoints[2], side)\n source -= (keypoints[0] - keypoints[2]) * self.box_shift\n return source, keypoints\n\n def preprocess_img(self, img):\n # fit the image into a 256x256 square\n shape = np.r_[img.shape]\n pad = (shape.max() - shape[:2]).astype('uint32') // 2\n img_pad = np.pad(\n img,\n ((pad[0],pad[0]), (pad[1],pad[1]), (0,0)),\n mode='constant')\n img_small = cv2.resize(img_pad, (256, 256))\n img_small = np.ascontiguousarray(img_small)\n \n img_norm = self._im_normalize(img_small)\n return img_pad, img_norm, pad\n\n\n def __call__(self, img):\n img_pad, img_norm, pad = self.preprocess_img(img)\n \n source, keypoints = self.detect_hand(img_norm)\n if source is None:\n return None, None\n\n # calculating transformation from img_pad coords\n # to img_landmark coords (cropped hand image)\n scale = max(img.shape) / 256\n Mtr = cv2.getAffineTransform(\n source * scale,\n self._target_triangle\n )\n \n img_landmark = cv2.warpAffine(\n self._im_normalize(img_pad), Mtr, (256,256)\n )\n \n joints = self.predict_joints(img_landmark)\n \n # adding the [0,0,1] row to make the matrix square\n Mtr = self._pad1(Mtr.T).T\n Mtr[2,:2] = 0\n\n Minv = np.linalg.inv(Mtr)\n\n # projecting keypoints back into original image coordinate space\n kp_orig = (self._pad1(joints) @ Minv.T)[:,:2]\n box_orig = (self._target_box @ Minv.T)[:,:2]\n kp_orig -= pad[::-1]\n box_orig -= pad[::-1]\n \n return kp_orig, box_orig\n"
] |
[
[
"numpy.pad",
"numpy.ascontiguousarray",
"tensorflow.lite.Interpreter",
"numpy.linalg.inv",
"numpy.linalg.norm",
"numpy.argmax",
"numpy.float32",
"numpy.exp"
]
] |
GVRX/potentiostat
|
[
"1bb44639180ad6d81697631d4d5f699e6fb4eef1"
] |
[
"software/python/potentiostat/examples/run_constant_w_plot.py"
] |
[
"from potentiostat import Potentiostat\nimport matplotlib.pyplot as plt\n\nport = '/dev/ttyACM0' # Serial port for potentiostat device\ndatafile = 'data.txt' # Name of output data file\n\ntest_name = 'constant' # Name of test to run - constant volate voltammetery\ncurr_range = '100uA' # Name of current range for test [-10uA, +10uA]\nsample_rate = 100.0 # Rate (samples/sec) at which to collect samples \n\ntest_param = { \n 'quietValue' : 0.0, # Output voltage during quiet peroid\n 'quietTime' : 1000, # Duration of quiet period (ms)\n 'value' : 2.5, # Output volatage (V) durring constant voltage test\n 'duration' : 4000, # Duration of constant voltage test (ms)\n }\n\n# Create Device object and set sample rate, current range and test parameters\ndev = Potentiostat(port) \ndev.set_sample_rate(sample_rate) \ndev.set_curr_range(curr_range) \ndev.set_param(test_name,test_param)\n\n# Run cyclic voltammetry test\nt,volt,curr = dev.run_test(test_name,display='pbar',filename=datafile)\n\n# plot results using matplotlib\nplt.subplot(211)\nplt.title('Voltage and current vs time')\nplt.plot(t,volt)\nplt.ylabel('potential (V)')\nplt.ylim(0,test_param['value']*1.1)\nplt.grid('on')\n\nplt.subplot(212)\nplt.plot(t,curr)\nplt.ylabel('current (uA)')\nplt.xlabel('time (sec)')\nplt.grid('on')\n\nplt.show()\n\n"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
justinphan3110/Scweet
|
[
"9d61121f95f854cce58bc3c16b8fc06b20e530a6"
] |
[
"Scweet/utils.py"
] |
[
"from io import StringIO, BytesIO\nimport os\nimport re\nfrom time import sleep\nimport random\nimport chromedriver_autoinstaller\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport datetime\nimport pandas as pd\nimport platform\nfrom selenium.webdriver.common.keys import Keys\n# import pathlib\n\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom . import const\nimport urllib\nfrom fake_useragent import UserAgent\nfrom selenium.webdriver.common.by import By\nfrom .const import get_username, get_password, get_email\n\n\n# current_dir = pathlib.Path(__file__).parent.absolute()\n\ndef get_data(card, save_images=False, save_dir=None):\n \"\"\"Extract data from tweet card\"\"\"\n image_links = []\n\n try:\n username = card.find_element_by_xpath('.//span').text\n except:\n return\n\n try:\n handle = card.find_element_by_xpath('.//span[contains(text(), \"@\")]').text\n except:\n return\n\n try:\n postdate = card.find_element_by_xpath('.//time').get_attribute('datetime')\n except:\n return\n\n try:\n text = card.find_element_by_xpath('.//div[2]/div[2]/div[1]').text\n except:\n text = \"\"\n\n try:\n embedded = card.find_element_by_xpath('.//div[2]/div[2]/div[2]').text\n except:\n embedded = \"\"\n\n # text = comment + embedded\n\n try:\n reply_cnt = card.find_element_by_xpath('.//div[@data-testid=\"reply\"]').text\n except:\n reply_cnt = 0\n\n try:\n retweet_cnt = card.find_element_by_xpath('.//div[@data-testid=\"retweet\"]').text\n except:\n retweet_cnt = 0\n\n try:\n like_cnt = card.find_element_by_xpath('.//div[@data-testid=\"like\"]').text\n except:\n like_cnt = 0\n\n try:\n elements = card.find_elements_by_xpath('.//div[2]/div[2]//img[contains(@src, \"https://pbs.twimg.com/\")]')\n for element in elements:\n image_links.append(element.get_attribute('src'))\n except:\n image_links = []\n\n # if save_images == True:\n #\tfor image_url in image_links:\n #\t\tsave_image(image_url, image_url, save_dir)\n # handle promoted tweets\n\n try:\n promoted = card.find_element_by_xpath('.//div[2]/div[2]/[last()]//span').text == \"Promoted\"\n except:\n promoted = False\n if promoted:\n return\n\n # get a string of all emojis contained in the tweet\n try:\n emoji_tags = card.find_elements_by_xpath('.//img[contains(@src, \"emoji\")]')\n except:\n return\n emoji_list = []\n for tag in emoji_tags:\n try:\n filename = tag.get_attribute('src')\n emoji = chr(int(re.search(r'svg\\/([a-z0-9]+)\\.svg', filename).group(1), base=16))\n except AttributeError:\n continue\n if emoji:\n emoji_list.append(emoji)\n emojis = ' '.join(emoji_list)\n\n # tweet url\n try:\n element = card.find_element_by_xpath('.//a[contains(@href, \"/status/\")]')\n tweet_url = element.get_attribute('href')\n except:\n return\n\n tweet = (\n username, handle, postdate, text, embedded, emojis, reply_cnt, retweet_cnt, like_cnt, image_links, tweet_url)\n return tweet\n\n\ndef init_driver(headless=True, proxy=None, show_images=False, option=None):\n \"\"\" initiate a chromedriver instance \n --option : other option to add (str)\n \"\"\"\n\n # create instance of web driver\n chromedriver_path = chromedriver_autoinstaller.install()\n # options\n options = Options()\n if headless is True:\n print(\"Scraping on headless mode.\")\n options.add_argument('--disable-gpu')\n options.headless = True\n else:\n options.headless = False\n options.add_argument('log-level=3')\n if proxy is not None:\n options.add_argument('--proxy-server=%s' % proxy)\n print(\"using proxy : \", proxy)\n if show_images == False:\n prefs = {\"profile.managed_default_content_settings.images\": 2}\n options.add_experimental_option(\"prefs\", prefs)\n if option is not None:\n options.add_argument(option)\n\n userAgent = UserAgent().random\n\n options = webdriver.ChromeOptions()\n options.add_argument('-headless')\n options.add_argument('-no-sandbox')\n options.add_argument('-disable-dev-shm-usage')\n options.add_argument(f'user-agent={userAgent}')\n\n wd = webdriver.Chrome('chromedriver',options=options)\n wd.set_page_load_timeout(100)\n return wd\n\n\ndef log_search_page(driver, since, until_local, lang, display_type, words, to_account, from_account, mention_account,\n hashtag, filter_replies, proximity,\n geocode, minreplies, minlikes, minretweets):\n \"\"\" Search for this query between since and until_local\"\"\"\n # format the <from_account>, <to_account> and <hash_tags>\n from_account = \"(from%3A\" + from_account + \")%20\" if from_account is not None else \"\"\n to_account = \"(to%3A\" + to_account + \")%20\" if to_account is not None else \"\"\n mention_account = \"(%40\" + mention_account + \")%20\" if mention_account is not None else \"\"\n hash_tags = \"(%23\" + hashtag + \")%20\" if hashtag is not None else \"\"\n\n if words is not None:\n if len(words) == 1:\n words = \"(\" + str(''.join(words)) + \")%20\"\n else:\n words = \"(\" + str('%20OR%20'.join(words)) + \")%20\"\n else:\n words = \"\"\n\n if lang is not None:\n lang = 'lang%3A' + lang\n else:\n lang = \"\"\n\n until_local = \"until%3A\" + until_local + \"%20\"\n since = \"since%3A\" + since + \"%20\"\n\n if display_type == \"Latest\" or display_type == \"latest\":\n display_type = \"&f=live\"\n elif display_type == \"Image\" or display_type == \"image\":\n display_type = \"&f=image\"\n else:\n display_type = \"\"\n\n # filter replies \n if filter_replies == True:\n filter_replies = \"%20-filter%3Areplies\"\n else:\n filter_replies = \"\"\n # geo\n if geocode is not None:\n geocode = \"%20geocode%3A\" + geocode\n else:\n geocode = \"\"\n # min number of replies\n if minreplies is not None:\n minreplies = \"%20min_replies%3A\" + str(minreplies)\n else:\n minreplies = \"\"\n # min number of likes\n if minlikes is not None:\n minlikes = \"%20min_faves%3A\" + str(minlikes)\n else:\n minlikes = \"\"\n # min number of retweets\n if minretweets is not None:\n minretweets = \"%20min_retweets%3A\" + str(minretweets)\n else:\n minretweets = \"\"\n\n # proximity\n if proximity == True:\n proximity = \"&lf=on\" # at the end\n else:\n proximity = \"\"\n\n path = 'https://twitter.com/search?q=' + words + from_account + to_account + mention_account + hash_tags + until_local + since + lang + filter_replies + geocode + minreplies + minlikes + minretweets + '&src=typed_query' + display_type + proximity\n driver.get(path)\n return path\n\n\ndef get_last_date_from_csv(path):\n df = pd.read_csv(path)\n return datetime.datetime.strftime(max(pd.to_datetime(df[\"Timestamp\"])), '%Y-%m-%dT%H:%M:%S.000Z')\n\n\ndef log_in(driver, env, timeout=20, wait=4):\n email = get_email(env) # const.EMAIL\n password = get_password(env) # const.PASSWORD\n username = get_username(env) # const.USERNAME\n\n driver.get('https://twitter.com/i/flow/login')\n\n email_xpath = '//input[@autocomplete=\"username\"]'\n password_xpath = '//input[@autocomplete=\"current-password\"]'\n username_xpath = '//input[@data-testid=\"ocfEnterTextTextInput\"]'\n\n sleep(random.uniform(wait, wait + 1))\n\n # enter email\n email_el = driver.find_element_by_xpath(email_xpath)\n sleep(random.uniform(wait, wait + 1))\n email_el.send_keys(email)\n sleep(random.uniform(wait, wait + 1))\n email_el.send_keys(Keys.RETURN)\n sleep(random.uniform(wait, wait + 1))\n # in case twitter spotted unusual login activity : enter your username\n if check_exists_by_xpath(username_xpath, driver):\n username_el = driver.find_element_by_xpath(username_xpath)\n sleep(random.uniform(wait, wait + 1))\n username_el.send_keys(username)\n sleep(random.uniform(wait, wait + 1))\n username_el.send_keys(Keys.RETURN)\n sleep(random.uniform(wait, wait + 1))\n # enter password\n password_el = driver.find_element_by_xpath(password_xpath)\n password_el.send_keys(password)\n sleep(random.uniform(wait, wait + 1))\n password_el.send_keys(Keys.RETURN)\n sleep(random.uniform(wait, wait + 1))\n\n\ndef keep_scroling(driver, data, writer, tweet_ids, scrolling, tweet_parsed, limit, scroll, last_position,\n save_images=False):\n \"\"\" scrolling function for tweets crawling\"\"\"\n\n save_images_dir = \"/images\"\n\n if save_images == True:\n if not os.path.exists(save_images_dir):\n os.mkdir(save_images_dir)\n\n while scrolling and tweet_parsed < limit:\n sleep(random.uniform(0.5, 1.5))\n # get the card of tweets\n page_cards = driver.find_elements_by_xpath('//article[@data-testid=\"tweet\"]') # changed div by article\n for card in page_cards:\n tweet = get_data(card, save_images, save_images_dir)\n if tweet:\n # check if the tweet is unique\n tweet_id = ''.join(tweet[:-2])\n if tweet_id not in tweet_ids:\n tweet_ids.add(tweet_id)\n data.append(tweet)\n last_date = str(tweet[2])\n print(\"Tweet made at: \" + str(last_date) + \" is found.\")\n writer.writerow(tweet)\n tweet_parsed += 1\n if tweet_parsed >= limit:\n break\n scroll_attempt = 0\n while tweet_parsed < limit:\n # check scroll position\n scroll += 1\n print(\"scroll \", scroll)\n sleep(random.uniform(0.5, 1.5))\n driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n curr_position = driver.execute_script(\"return window.pageYOffset;\")\n if last_position == curr_position:\n scroll_attempt += 1\n # end of scroll region\n if scroll_attempt >= 2:\n scrolling = False\n break\n else:\n sleep(random.uniform(0.5, 1.5)) # attempt another scroll\n else:\n last_position = curr_position\n break\n return driver, data, writer, tweet_ids, scrolling, tweet_parsed, scroll, last_position\n\n\ndef get_users_follow(users, headless, env, follow=None, verbose=1, wait=2, limit=float('inf')):\n \"\"\" get the following or followers of a list of users \"\"\"\n\n # initiate the driver\n driver = init_driver(headless=headless)\n sleep(wait)\n # log in (the .env file should contain the username and password)\n # driver.get('https://www.twitter.com/login')\n log_in(driver, env, wait=wait)\n sleep(wait)\n # followers and following dict of each user\n follows_users = {}\n\n for user in users:\n # if the login fails, find the new log in button and log in again.\n if check_exists_by_link_text(\"Log in\", driver):\n print(\"Login failed. Retry...\")\n login = driver.find_element_by_link_text(\"Log in\")\n sleep(random.uniform(wait - 0.5, wait + 0.5))\n driver.execute_script(\"arguments[0].click();\", login)\n sleep(random.uniform(wait - 0.5, wait + 0.5))\n sleep(wait)\n log_in(driver, env)\n sleep(wait)\n # case 2 \n if check_exists_by_xpath('//input[@name=\"session[username_or_email]\"]', driver):\n print(\"Login failed. Retry...\")\n sleep(wait)\n log_in(driver, env)\n sleep(wait)\n print(\"Crawling \" + user + \" \" + follow)\n driver.get('https://twitter.com/' + user + '/' + follow)\n sleep(random.uniform(wait - 0.5, wait + 0.5))\n # check if we must keep scrolling\n scrolling = True\n last_position = driver.execute_script(\"return window.pageYOffset;\")\n follows_elem = []\n follow_ids = set()\n is_limit = False\n while scrolling and not is_limit:\n # get the card of following or followers\n # this is the primaryColumn attribute that contains both followings and followers\n primaryColumn = driver.find_element_by_xpath('//div[contains(@data-testid,\"primaryColumn\")]')\n # extract only the Usercell\n page_cards = primaryColumn.find_elements_by_xpath('//div[contains(@data-testid,\"UserCell\")]')\n for card in page_cards:\n # get the following or followers element\n element = card.find_element_by_xpath('.//div[1]/div[1]/div[1]//a[1]')\n follow_elem = element.get_attribute('href')\n # append to the list\n follow_id = str(follow_elem)\n follow_elem = '@' + str(follow_elem).split('/')[-1]\n if follow_id not in follow_ids:\n follow_ids.add(follow_id)\n follows_elem.append(follow_elem)\n if len(follows_elem) >= limit:\n is_limit = True\n break\n if verbose:\n print(follow_elem)\n print(\"Found \" + str(len(follows_elem)) + \" \" + follow)\n scroll_attempt = 0\n while not is_limit:\n sleep(random.uniform(wait - 0.5, wait + 0.5))\n driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n sleep(random.uniform(wait - 0.5, wait + 0.5))\n curr_position = driver.execute_script(\"return window.pageYOffset;\")\n if last_position == curr_position:\n scroll_attempt += 1\n # end of scroll region\n if scroll_attempt >= 2:\n scrolling = False\n break\n else:\n sleep(random.uniform(wait - 0.5, wait + 0.5)) # attempt another scroll\n else:\n last_position = curr_position\n break\n\n follows_users[user] = follows_elem\n\n return follows_users\n\n\ndef check_exists_by_link_text(text, driver):\n try:\n driver.find_element_by_link_text(text)\n except NoSuchElementException:\n return False\n return True\n\n\ndef check_exists_by_xpath(xpath, driver):\n timeout = 3\n try:\n driver.find_element_by_xpath(xpath)\n except NoSuchElementException:\n return False\n return True\n\n\ndef dowload_images(urls, save_dir):\n for i, url_v in enumerate(urls):\n for j, url in enumerate(url_v):\n urllib.request.urlretrieve(url, save_dir + '/' + str(i + 1) + '_' + str(j + 1) + \".jpg\")\n"
] |
[
[
"pandas.read_csv",
"pandas.to_datetime"
]
] |
JayMan91/e2e-model-learning
|
[
"acb6b071d9646c31de4b7f45d8dd13fe3c206c5b"
] |
[
"newsvendor/main.py"
] |
[
"#!/usr/bin/env python3\n\nimport argparse\n\nimport cvxpy as cp\nimport numpy as np\n\nimport os\n\nfrom datetime import datetime\n\nimport importlib\ntry: import setGPU\nexcept ImportError: pass\n\nimport torch\nfrom torch.autograd import Variable\n\nimport setproctitle\n\nimport mle, mle_net, policy_net, task_net, plot\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Run newsvendor task net experiments.')\n parser.add_argument('--save', type=str, required=True, \n metavar='save-folder', help='save folder path')\n parser.add_argument('--nRuns', type=int, default=10,\n metavar='runs', help='number of runs')\n parser.add_argument('--trueModel', type=str, \n choices=['linear', 'nonlinear', 'both'], default='both', \n help='true y|x distribution')\n args = parser.parse_args()\n\n setproctitle.setproctitle('pdonti.' + args.save + args.trueModel)\n\n if not os.path.exists(args.save):\n os.makedirs(args.save)\n\n # Cost params for newsvendor task loss\n params = init_newsvendor_params()\n\n true_model_types = ['linear', 'nonlinear'] if args.trueModel == 'both' else [args.trueModel]\n\n for true_model in true_model_types:\n\n save_folder = os.path.join(args.save, true_model)\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n results_file = os.path.join(save_folder, 'inventory_results.csv')\n\n # Randomly generate true params for p(y|x;\\theta).\n # Set with_seed=True to replicate paper true params.\n Theta_true_lin, Theta_true_sq = init_theta_true(\n params, is_linear=(true_model == 'linear'), with_seed=True)\n\n # Test data. Set with_seed=True to replicate paper test data.\n X_test, Y_test = gen_data(1000, params, Theta_true_lin, Theta_true_sq,\n with_seed=True)\n\n # MLE with true params\n f_eval_mle_t, z_buy_t, f_opt_t = mle.newsvendor_eval(\n X_test, Y_test, Theta_true_lin, Theta_true_sq, params)\n print(np.mean(f_eval_mle_t))\n mle_true_score = np.mean(f_eval_mle_t)\n\n with open(results_file, 'w') as f:\n f.write('{},{}\\n'.format('mle_true:', mle_true_score))\n f.write('{},{},{},{},{},{},{}\\n'.format(\n 'm', 'mle-linear', 'mle-nonlinear', 'policy-linear', 'policy-nonlinear', 'task-linear', 'task-nonlinear'))\n\n for run in range(args.nRuns):\n for m in [100, 200, 300, 500, 1000, 3000, 5000, 10000]:\n\n with open(results_file, 'a') as f:\n f.write('\\n{},'.format(m))\n\n # Generate data based on true params\n try:\n X, Y = gen_data(m, params, Theta_true_lin, Theta_true_sq)\n except Exception as e:\n log_error_and_write(e, save_folder, m, run, 'gen', \n results_file, newline=True)\n\n # MLE with linear softmax regression\n try:\n Theta_est = mle.linear_softmax_reg(X, Y, params)\n f_eval_mle, z_buy, f_opt = \\\n mle.newsvendor_eval(X_test, Y_test, Theta_est, \n np.zeros((params['n'], len(params['d']))),\n params)\n mle_score = np.mean(f_eval_mle)\n\n print(mle_score)\n with open(results_file, 'a') as f:\n f.write('{},'.format(mle_score))\n except Exception as e:\n log_error_and_write(e, save_folder, m, run, 'mle-linear', results_file)\n\n\n # Nonlinear MLE net\n try:\n mle_nonlin_score = mle_net.run_mle_net(\n X, Y, X_test, Y_test, params)\n\n print(mle_nonlin_score)\n with open(results_file, 'a') as f:\n f.write('{},'.format(mle_nonlin_score))\n except Exception as e:\n log_error_and_write(e, save_folder, m, run, \n 'mle-nonlinear', results_file)\n\n\n # Pure end-to-end policy neural net (linear)\n try:\n policy_lin_score = policy_net.run_policy_net(\n X, Y, X_test, Y_test, params)\n\n print(policy_lin_score)\n with open(results_file, 'a') as f:\n f.write('{},'.format(policy_lin_score))\n except Exception as e:\n log_error_and_write(e, save_folder, m, run, 'policy-linear', results_file)\n\n\n # Pure end-to-end policy neural net (nonlinear)\n try:\n policy_nonlin_score = policy_net.run_policy_net(\n X, Y, X_test, Y_test, params, is_nonlinear=True)\n\n print(policy_nonlin_score)\n with open(results_file, 'a') as f:\n f.write('{},'.format(policy_nonlin_score))\n except Exception as e:\n log_error_and_write(e, save_folder, m, run, 'policy-nonlinear', results_file)\n\n\n # Model-based end-to-end model (linear)\n try:\n e2e_lin_score = task_net.run_task_net(\n X, Y, X_test, Y_test, params)\n\n print(e2e_lin_score)\n with open(results_file, 'a') as f:\n f.write('{},'.format(e2e_lin_score))\n except Exception as e:\n log_error_and_write(e, save_folder, m, run, \n 'task-linear', results_file)\n\n # Model-based end-to-end model (nonlinear)\n try:\n e2e_nonlin_score = task_net.run_task_net(\n X, Y, X_test, Y_test, params, is_nonlinear=True)\n\n print(e2e_nonlin_score)\n with open(results_file, 'a') as f:\n f.write('{}\\n'.format(e2e_nonlin_score))\n except Exception as e:\n log_error_and_write(e, save_folder, m, run, results_file, \n 'task-nonlinear', newline=True)\n\n # Plot results as we go\n try:\n plot.plot_results(save_folder, true_model)\n except Exception as e:\n with open(os.path.join(save_folder, \n 'errors.log'), 'a') as f:\n f.write('{}: m {}, model {}, run {}: {}\\n'.format(\n datetime.now(), m, 'plot', run, e))\n \n\n\ndef init_newsvendor_params():\n params = {}\n\n # Ordering costs\n params['c_lin'] = 10\n params['c_quad'] = 2.0\n\n # Over-order penalties\n params['b_lin'] = 30\n params['b_quad'] = 14\n\n # Under-order penalties\n params['h_lin'] = 10\n params['h_quad'] = 2\n\n # Discrete demands\n params['d'] = np.array([1, 2, 5, 10, 20]).astype(np.float32)\n\n # Number of features\n params['n'] = 20\n\n return params\n\n\ndef init_theta_true(params, is_linear=True, with_seed=False):\n if is_linear:\n # Linear true model (py ∝ exp(θX))\n np.random.seed(42) if with_seed else np.random.seed(None)\n Theta_true_lin = np.random.randn(params['n'], len(params['d']))\n Theta_true_sq = np.zeros((params['n'], len(params['d'])))\n else:\n # Squared true model (py ∝ exp((θX)^2))\n Theta_true_lin = np.zeros((params['n'], len(params['d'])))\n np.random.seed(42) if with_seed else np.random.seed(None)\n Theta_true_sq = np.random.randn(params['n'], len(params['d']))\n\n np.random.seed(None)\n\n return Theta_true_lin, Theta_true_sq\n\n\ndef gen_data(m, params, Theta_true_lin, Theta_true_sq, with_seed=False):\n np.random.seed(0) if with_seed else np.random.seed(None)\n X = np.random.randn(m, params['n'])\n\n PY = np.exp(X.dot(Theta_true_lin) + (X.dot(Theta_true_sq)) ** 2)\n PY = PY / np.sum(PY, axis=1)[:, None]\n\n # Generate demand realizations\n Y = np.where(np.cumsum(np.random.rand(m)[:, None]\n < np.cumsum(PY, axis=1), axis=1) == 1)[1]\n Y = np.eye(len(params['d']))[Y, :]\n\n np.random.seed(None)\n\n return X, Y\n\n\ndef log_error_and_write(e, save_folder, m, run, model, results_file, newline=False):\n with open(os.path.join(save_folder, 'errors.log'), 'a') as f:\n f.write('{}: m {}, model {}, run {}: {}\\n'.format(\n datetime.now(), m, model, run, e))\n with open(results_file, 'a') as f:\n f.write('\\n' if newline else ',')\n\n\nif __name__=='__main__':\n main()\n"
] |
[
[
"numpy.random.seed",
"numpy.cumsum",
"numpy.random.randn",
"numpy.mean",
"numpy.random.rand",
"numpy.array",
"numpy.sum"
]
] |
BishmoyPaul/lama
|
[
"c7f5af9c167a15e2b0b741b1419237de52c4af05"
] |
[
"saicinpainting/training/modules/pix2pixhd.py"
] |
[
"# original: https://github.com/NVIDIA/pix2pixHD/blob/master/models/networks.py\nimport collections\nfrom functools import partial\nimport functools\nimport logging\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch.nn as nn\n\nfrom saicinpainting.training.modules.base import BaseDiscriminator, deconv_factory, get_conv_block_ctor, get_norm_layer, get_activation\nfrom saicinpainting.training.modules.ffc import FFCResnetBlock\nfrom saicinpainting.training.modules.multidilated_conv import MultidilatedConv\n\nclass DotDict(defaultdict):\n # https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary\n \"\"\"dot.notation access to dictionary attributes\"\"\"\n __getattr__ = defaultdict.get\n __setattr__ = defaultdict.__setitem__\n __delattr__ = defaultdict.__delitem__\n\nclass Identity(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n return x\n\n\nclass ResnetBlock(nn.Module):\n def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False, conv_kind='default',\n dilation=1, in_dim=None, groups=1, second_dilation=None):\n super(ResnetBlock, self).__init__()\n self.in_dim = in_dim\n self.dim = dim\n if second_dilation is None:\n second_dilation = dilation\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout,\n conv_kind=conv_kind, dilation=dilation, in_dim=in_dim, groups=groups,\n second_dilation=second_dilation)\n\n if self.in_dim is not None:\n self.input_conv = nn.Conv2d(in_dim, dim, 1)\n\n self.out_channnels = dim\n\n def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout, conv_kind='default',\n dilation=1, in_dim=None, groups=1, second_dilation=1):\n conv_layer = get_conv_block_ctor(conv_kind)\n\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(dilation)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(dilation)]\n elif padding_type == 'zero':\n p = dilation\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n if in_dim is None:\n in_dim = dim\n\n conv_block += [conv_layer(in_dim, dim, kernel_size=3, padding=p, dilation=dilation),\n norm_layer(dim),\n activation]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(second_dilation)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(second_dilation)]\n elif padding_type == 'zero':\n p = second_dilation\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [conv_layer(dim, dim, kernel_size=3, padding=p, dilation=second_dilation, groups=groups),\n norm_layer(dim)]\n\n return nn.Sequential(*conv_block)\n\n def forward(self, x):\n x_before = x\n if self.in_dim is not None:\n x = self.input_conv(x)\n out = x + self.conv_block(x_before)\n return out\n\nclass ResnetBlock5x5(nn.Module):\n def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False, conv_kind='default',\n dilation=1, in_dim=None, groups=1, second_dilation=None):\n super(ResnetBlock5x5, self).__init__()\n self.in_dim = in_dim\n self.dim = dim\n if second_dilation is None:\n second_dilation = dilation\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout,\n conv_kind=conv_kind, dilation=dilation, in_dim=in_dim, groups=groups,\n second_dilation=second_dilation)\n\n if self.in_dim is not None:\n self.input_conv = nn.Conv2d(in_dim, dim, 1)\n\n self.out_channnels = dim\n\n def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout, conv_kind='default',\n dilation=1, in_dim=None, groups=1, second_dilation=1):\n conv_layer = get_conv_block_ctor(conv_kind)\n\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(dilation * 2)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(dilation * 2)]\n elif padding_type == 'zero':\n p = dilation * 2\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n if in_dim is None:\n in_dim = dim\n\n conv_block += [conv_layer(in_dim, dim, kernel_size=5, padding=p, dilation=dilation),\n norm_layer(dim),\n activation]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(second_dilation * 2)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(second_dilation * 2)]\n elif padding_type == 'zero':\n p = second_dilation * 2\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [conv_layer(dim, dim, kernel_size=5, padding=p, dilation=second_dilation, groups=groups),\n norm_layer(dim)]\n\n return nn.Sequential(*conv_block)\n\n def forward(self, x):\n x_before = x\n if self.in_dim is not None:\n x = self.input_conv(x)\n out = x + self.conv_block(x_before)\n return out\n\n\nclass MultidilatedResnetBlock(nn.Module):\n def __init__(self, dim, padding_type, conv_layer, norm_layer, activation=nn.ReLU(True), use_dropout=False):\n super().__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, conv_layer, norm_layer, activation, use_dropout)\n\n def build_conv_block(self, dim, padding_type, conv_layer, norm_layer, activation, use_dropout, dilation=1):\n conv_block = []\n conv_block += [conv_layer(dim, dim, kernel_size=3, padding_mode=padding_type),\n norm_layer(dim),\n activation]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n conv_block += [conv_layer(dim, dim, kernel_size=3, padding_mode=padding_type),\n norm_layer(dim)]\n\n return nn.Sequential(*conv_block)\n\n def forward(self, x):\n out = x + self.conv_block(x)\n return out\n\n\nclass MultiDilatedGlobalGenerator(nn.Module):\n def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3,\n n_blocks=3, norm_layer=nn.BatchNorm2d,\n padding_type='reflect', conv_kind='default',\n deconv_kind='convtranspose', activation=nn.ReLU(True),\n up_norm_layer=nn.BatchNorm2d, affine=None, up_activation=nn.ReLU(True),\n add_out_act=True, max_features=1024, multidilation_kwargs={},\n ffc_positions=None, ffc_kwargs={}):\n assert (n_blocks >= 0)\n super().__init__()\n\n conv_layer = get_conv_block_ctor(conv_kind)\n resnet_conv_layer = functools.partial(get_conv_block_ctor('multidilated'), **multidilation_kwargs)\n norm_layer = get_norm_layer(norm_layer)\n if affine is not None:\n norm_layer = partial(norm_layer, affine=affine)\n up_norm_layer = get_norm_layer(up_norm_layer)\n if affine is not None:\n up_norm_layer = partial(up_norm_layer, affine=affine)\n\n model = [nn.ReflectionPad2d(3),\n conv_layer(input_nc, ngf, kernel_size=7, padding=0),\n norm_layer(ngf),\n activation]\n\n identity = Identity()\n ### downsample\n for i in range(n_downsampling):\n mult = 2 ** i\n\n model += [conv_layer(min(max_features, ngf * mult),\n min(max_features, ngf * mult * 2),\n kernel_size=3, stride=2, padding=1),\n norm_layer(min(max_features, ngf * mult * 2)),\n activation]\n\n mult = 2 ** n_downsampling\n feats_num_bottleneck = min(max_features, ngf * mult)\n\n ### resnet blocks\n for i in range(n_blocks):\n if ffc_positions is not None and i in ffc_positions:\n model += [FFCResnetBlock(feats_num_bottleneck, padding_type, norm_layer, activation_layer=nn.ReLU,\n inline=True, **ffc_kwargs)]\n model += [MultidilatedResnetBlock(feats_num_bottleneck, padding_type=padding_type,\n conv_layer=resnet_conv_layer, activation=activation,\n norm_layer=norm_layer)]\n\n ### upsample\n for i in range(n_downsampling):\n mult = 2 ** (n_downsampling - i)\n model += deconv_factory(deconv_kind, ngf, mult, up_norm_layer, up_activation, max_features)\n model += [nn.ReflectionPad2d(3),\n nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n if add_out_act:\n model.append(get_activation('tanh' if add_out_act is True else add_out_act))\n self.model = nn.Sequential(*model)\n\n def forward(self, input):\n return self.model(input)\n\nclass ConfigGlobalGenerator(nn.Module):\n def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3,\n n_blocks=3, norm_layer=nn.BatchNorm2d,\n padding_type='reflect', conv_kind='default',\n deconv_kind='convtranspose', activation=nn.ReLU(True),\n up_norm_layer=nn.BatchNorm2d, affine=None, up_activation=nn.ReLU(True),\n add_out_act=True, max_features=1024,\n manual_block_spec=[],\n resnet_block_kind='multidilatedresnetblock',\n resnet_conv_kind='multidilated',\n resnet_dilation=1,\n multidilation_kwargs={}):\n assert (n_blocks >= 0)\n super().__init__()\n\n conv_layer = get_conv_block_ctor(conv_kind)\n resnet_conv_layer = functools.partial(get_conv_block_ctor(resnet_conv_kind), **multidilation_kwargs)\n norm_layer = get_norm_layer(norm_layer)\n if affine is not None:\n norm_layer = partial(norm_layer, affine=affine)\n up_norm_layer = get_norm_layer(up_norm_layer)\n if affine is not None:\n up_norm_layer = partial(up_norm_layer, affine=affine)\n\n model = [nn.ReflectionPad2d(3),\n conv_layer(input_nc, ngf, kernel_size=7, padding=0),\n norm_layer(ngf),\n activation]\n\n identity = Identity()\n\n ### downsample\n for i in range(n_downsampling):\n mult = 2 ** i\n model += [conv_layer(min(max_features, ngf * mult),\n min(max_features, ngf * mult * 2),\n kernel_size=3, stride=2, padding=1),\n norm_layer(min(max_features, ngf * mult * 2)),\n activation]\n\n mult = 2 ** n_downsampling\n feats_num_bottleneck = min(max_features, ngf * mult)\n\n if len(manual_block_spec) == 0:\n manual_block_spec = [\n DotDict(lambda : None, {\n 'n_blocks': n_blocks,\n 'use_default': True})\n ]\n\n ### resnet blocks\n for block_spec in manual_block_spec:\n def make_and_add_blocks(model, block_spec):\n block_spec = DotDict(lambda : None, block_spec)\n if not block_spec.use_default:\n resnet_conv_layer = functools.partial(get_conv_block_ctor(block_spec.resnet_conv_kind), **block_spec.multidilation_kwargs)\n resnet_conv_kind = block_spec.resnet_conv_kind\n resnet_block_kind = block_spec.resnet_block_kind\n if block_spec.resnet_dilation is not None:\n resnet_dilation = block_spec.resnet_dilation\n for i in range(block_spec.n_blocks):\n if resnet_block_kind == \"multidilatedresnetblock\":\n model += [MultidilatedResnetBlock(feats_num_bottleneck, padding_type=padding_type,\n conv_layer=resnet_conv_layer, activation=activation,\n norm_layer=norm_layer)]\n if resnet_block_kind == \"resnetblock\": \n model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer,\n conv_kind=resnet_conv_kind)]\n if resnet_block_kind == \"resnetblock5x5\": \n model += [ResnetBlock5x5(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer,\n conv_kind=resnet_conv_kind)]\n if resnet_block_kind == \"resnetblockdwdil\":\n model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer,\n conv_kind=resnet_conv_kind, dilation=resnet_dilation, second_dilation=resnet_dilation)]\n make_and_add_blocks(model, block_spec)\n \n ### upsample\n for i in range(n_downsampling):\n mult = 2 ** (n_downsampling - i)\n model += deconv_factory(deconv_kind, ngf, mult, up_norm_layer, up_activation, max_features)\n model += [nn.ReflectionPad2d(3),\n nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n if add_out_act:\n model.append(get_activation('tanh' if add_out_act is True else add_out_act))\n self.model = nn.Sequential(*model)\n\n def forward(self, input):\n return self.model(input)\n\n\ndef make_dil_blocks(dilated_blocks_n, dilation_block_kind, dilated_block_kwargs):\n blocks = []\n for i in range(dilated_blocks_n):\n if dilation_block_kind == 'simple':\n blocks.append(ResnetBlock(**dilated_block_kwargs, dilation=2 ** (i + 1)))\n elif dilation_block_kind == 'multi':\n blocks.append(MultidilatedResnetBlock(**dilated_block_kwargs))\n else:\n raise ValueError(f'dilation_block_kind could not be \"{dilation_block_kind}\"')\n return blocks\n\n\nclass GlobalGenerator(nn.Module):\n def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d,\n padding_type='reflect', conv_kind='default', activation=nn.ReLU(True),\n up_norm_layer=nn.BatchNorm2d, affine=None,\n up_activation=nn.ReLU(True), dilated_blocks_n=0, dilated_blocks_n_start=0,\n dilated_blocks_n_middle=0,\n add_out_act=True,\n max_features=1024, is_resblock_depthwise=False,\n ffc_positions=None, ffc_kwargs={}, dilation=1, second_dilation=None,\n dilation_block_kind='simple', multidilation_kwargs={}):\n assert (n_blocks >= 0)\n super().__init__()\n\n conv_layer = get_conv_block_ctor(conv_kind)\n norm_layer = get_norm_layer(norm_layer)\n if affine is not None:\n norm_layer = partial(norm_layer, affine=affine)\n up_norm_layer = get_norm_layer(up_norm_layer)\n if affine is not None:\n up_norm_layer = partial(up_norm_layer, affine=affine)\n\n if ffc_positions is not None:\n ffc_positions = collections.Counter(ffc_positions)\n\n model = [nn.ReflectionPad2d(3),\n conv_layer(input_nc, ngf, kernel_size=7, padding=0),\n norm_layer(ngf),\n activation]\n\n identity = Identity()\n ### downsample\n for i in range(n_downsampling):\n mult = 2 ** i\n\n model += [conv_layer(min(max_features, ngf * mult),\n min(max_features, ngf * mult * 2),\n kernel_size=3, stride=2, padding=1),\n norm_layer(min(max_features, ngf * mult * 2)),\n activation]\n\n mult = 2 ** n_downsampling\n feats_num_bottleneck = min(max_features, ngf * mult)\n\n dilated_block_kwargs = dict(dim=feats_num_bottleneck, padding_type=padding_type,\n activation=activation, norm_layer=norm_layer)\n if dilation_block_kind == 'simple':\n dilated_block_kwargs['conv_kind'] = conv_kind\n elif dilation_block_kind == 'multi':\n dilated_block_kwargs['conv_layer'] = functools.partial(\n get_conv_block_ctor('multidilated'), **multidilation_kwargs)\n\n # dilated blocks at the start of the bottleneck sausage\n if dilated_blocks_n_start is not None and dilated_blocks_n_start > 0:\n model += make_dil_blocks(dilated_blocks_n_start, dilation_block_kind, dilated_block_kwargs)\n\n # resnet blocks\n for i in range(n_blocks):\n # dilated blocks at the middle of the bottleneck sausage\n if i == n_blocks // 2 and dilated_blocks_n_middle is not None and dilated_blocks_n_middle > 0:\n model += make_dil_blocks(dilated_blocks_n_middle, dilation_block_kind, dilated_block_kwargs)\n \n if ffc_positions is not None and i in ffc_positions:\n for _ in range(ffc_positions[i]): # same position can occur more than once\n model += [FFCResnetBlock(feats_num_bottleneck, padding_type, norm_layer, activation_layer=nn.ReLU,\n inline=True, **ffc_kwargs)]\n\n if is_resblock_depthwise:\n resblock_groups = feats_num_bottleneck\n else:\n resblock_groups = 1\n\n model += [ResnetBlock(feats_num_bottleneck, padding_type=padding_type, activation=activation,\n norm_layer=norm_layer, conv_kind=conv_kind, groups=resblock_groups,\n dilation=dilation, second_dilation=second_dilation)]\n \n\n # dilated blocks at the end of the bottleneck sausage\n if dilated_blocks_n is not None and dilated_blocks_n > 0:\n model += make_dil_blocks(dilated_blocks_n, dilation_block_kind, dilated_block_kwargs)\n\n # upsample\n for i in range(n_downsampling):\n mult = 2 ** (n_downsampling - i)\n model += [nn.ConvTranspose2d(min(max_features, ngf * mult),\n min(max_features, int(ngf * mult / 2)),\n kernel_size=3, stride=2, padding=1, output_padding=1),\n up_norm_layer(min(max_features, int(ngf * mult / 2))),\n up_activation]\n model += [nn.ReflectionPad2d(3),\n nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n if add_out_act:\n model.append(get_activation('tanh' if add_out_act is True else add_out_act))\n self.model = nn.Sequential(*model)\n\n def forward(self, input):\n return self.model(input)\n\n\nclass GlobalGeneratorGated(GlobalGenerator):\n def __init__(self, *args, **kwargs):\n real_kwargs=dict(\n conv_kind='gated_bn_relu',\n activation=nn.Identity(),\n norm_layer=nn.Identity\n )\n real_kwargs.update(kwargs)\n super().__init__(*args, **real_kwargs)\n\n\nclass GlobalGeneratorFromSuperChannels(nn.Module):\n def __init__(self, input_nc, output_nc, n_downsampling, n_blocks, super_channels, norm_layer=\"bn\", padding_type='reflect', add_out_act=True):\n super().__init__()\n self.n_downsampling = n_downsampling\n norm_layer = get_norm_layer(norm_layer)\n if type(norm_layer) == functools.partial:\n use_bias = (norm_layer.func == nn.InstanceNorm2d)\n else:\n use_bias = (norm_layer == nn.InstanceNorm2d)\n\n channels = self.convert_super_channels(super_channels)\n self.channels = channels\n\n model = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, channels[0], kernel_size=7, padding=0, bias=use_bias),\n norm_layer(channels[0]),\n nn.ReLU(True)]\n\n for i in range(n_downsampling): # add downsampling layers\n mult = 2 ** i\n model += [nn.Conv2d(channels[0+i], channels[1+i], kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(channels[1+i]),\n nn.ReLU(True)]\n\n mult = 2 ** n_downsampling\n\n n_blocks1 = n_blocks // 3\n n_blocks2 = n_blocks1\n n_blocks3 = n_blocks - n_blocks1 - n_blocks2\n\n for i in range(n_blocks1):\n c = n_downsampling\n dim = channels[c]\n model += [ResnetBlock(dim, padding_type=padding_type, norm_layer=norm_layer)]\n\n for i in range(n_blocks2):\n c = n_downsampling+1\n dim = channels[c]\n kwargs = {}\n if i == 0:\n kwargs = {\"in_dim\": channels[c-1]}\n model += [ResnetBlock(dim, padding_type=padding_type, norm_layer=norm_layer, **kwargs)]\n\n for i in range(n_blocks3):\n c = n_downsampling+2\n dim = channels[c]\n kwargs = {}\n if i == 0:\n kwargs = {\"in_dim\": channels[c-1]}\n model += [ResnetBlock(dim, padding_type=padding_type, norm_layer=norm_layer, **kwargs)]\n\n for i in range(n_downsampling): # add upsampling layers\n mult = 2 ** (n_downsampling - i)\n model += [nn.ConvTranspose2d(channels[n_downsampling+3+i],\n channels[n_downsampling+3+i+1],\n kernel_size=3, stride=2,\n padding=1, output_padding=1,\n bias=use_bias),\n norm_layer(channels[n_downsampling+3+i+1]),\n nn.ReLU(True)]\n model += [nn.ReflectionPad2d(3)]\n model += [nn.Conv2d(channels[2*n_downsampling+3], output_nc, kernel_size=7, padding=0)]\n\n if add_out_act:\n model.append(get_activation('tanh' if add_out_act is True else add_out_act))\n self.model = nn.Sequential(*model)\n\n def convert_super_channels(self, super_channels):\n n_downsampling = self.n_downsampling\n result = []\n cnt = 0\n\n if n_downsampling == 2:\n N1 = 10\n elif n_downsampling == 3:\n N1 = 13\n else:\n raise NotImplementedError\n\n for i in range(0, N1):\n if i in [1,4,7,10]:\n channel = super_channels[cnt] * (2 ** cnt)\n config = {'channel': channel}\n result.append(channel)\n logging.info(f\"Downsample channels {result[-1]}\")\n cnt += 1\n\n for i in range(3):\n for counter, j in enumerate(range(N1 + i * 3, N1 + 3 + i * 3)):\n if len(super_channels) == 6:\n channel = super_channels[3] * 4\n else:\n channel = super_channels[i + 3] * 4\n config = {'channel': channel}\n if counter == 0:\n result.append(channel)\n logging.info(f\"Bottleneck channels {result[-1]}\")\n cnt = 2\n\n for i in range(N1+9, N1+21):\n if i in [22, 25,28]:\n cnt -= 1\n if len(super_channels) == 6:\n channel = super_channels[5 - cnt] * (2 ** cnt)\n else:\n channel = super_channels[7 - cnt] * (2 ** cnt)\n result.append(int(channel))\n logging.info(f\"Upsample channels {result[-1]}\")\n return result\n\n def forward(self, input):\n return self.model(input)\n\n\n# Defines the PatchGAN discriminator with the specified arguments.\nclass NLayerDiscriminator(BaseDiscriminator):\n def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d,):\n super().__init__()\n self.n_layers = n_layers\n\n kw = 4\n padw = int(np.ceil((kw-1.0)/2))\n sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True)]]\n\n nf = ndf\n for n in range(1, n_layers):\n nf_prev = nf\n nf = min(nf * 2, 512)\n\n cur_model = []\n cur_model += [\n nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),\n norm_layer(nf),\n nn.LeakyReLU(0.2, True)\n ]\n sequence.append(cur_model)\n\n nf_prev = nf\n nf = min(nf * 2, 512)\n\n cur_model = []\n cur_model += [\n nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),\n norm_layer(nf),\n nn.LeakyReLU(0.2, True)\n ]\n sequence.append(cur_model)\n\n sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]\n\n for n in range(len(sequence)):\n setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))\n\n def get_all_activations(self, x):\n res = [x]\n for n in range(self.n_layers + 2):\n model = getattr(self, 'model' + str(n))\n res.append(model(res[-1]))\n return res[1:]\n\n def forward(self, x):\n act = self.get_all_activations(x)\n return act[-1], act[:-1]\n\n\nclass MultidilatedNLayerDiscriminator(BaseDiscriminator):\n def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, multidilation_kwargs={}):\n super().__init__()\n self.n_layers = n_layers\n\n kw = 4\n padw = int(np.ceil((kw-1.0)/2))\n sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True)]]\n\n nf = ndf\n for n in range(1, n_layers):\n nf_prev = nf\n nf = min(nf * 2, 512)\n\n cur_model = []\n cur_model += [\n MultidilatedConv(nf_prev, nf, kernel_size=kw, stride=2, padding=[2, 3], **multidilation_kwargs),\n norm_layer(nf),\n nn.LeakyReLU(0.2, True)\n ]\n sequence.append(cur_model)\n\n nf_prev = nf\n nf = min(nf * 2, 512)\n\n cur_model = []\n cur_model += [\n nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),\n norm_layer(nf),\n nn.LeakyReLU(0.2, True)\n ]\n sequence.append(cur_model)\n\n sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]\n\n for n in range(len(sequence)):\n setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))\n\n def get_all_activations(self, x):\n res = [x]\n for n in range(self.n_layers + 2):\n model = getattr(self, 'model' + str(n))\n res.append(model(res[-1]))\n return res[1:]\n\n def forward(self, x):\n act = self.get_all_activations(x)\n return act[-1], act[:-1]\n\n\nclass NLayerDiscriminatorAsGen(NLayerDiscriminator):\n def forward(self, x):\n return super().forward(x)[0]\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.ReflectionPad2d",
"torch.nn.ConvTranspose2d",
"torch.nn.Conv2d",
"numpy.ceil",
"torch.nn.Identity",
"torch.nn.LeakyReLU",
"torch.nn.ReLU",
"torch.nn.ReplicationPad2d"
]
] |
alievilya/nas-fedot
|
[
"ce1b07505cd189f3097f1cfa6c38cb4f0d56ccea",
"ce1b07505cd189f3097f1cfa6c38cb4f0d56ccea"
] |
[
"fedot/benchmark/experiments/viz.py",
"fedot/test/test_regression.py"
] |
[
"from itertools import cycle\r\n\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\n\r\ndef fitness_by_generations_boxplots(history_runs, iterations):\r\n iters = [it for it in range(iterations)]\r\n fitness_by_iter = []\r\n for it in iters:\r\n fitness_values = []\r\n for history in history_runs:\r\n value = history[it]\r\n fitness_values.append(value)\r\n fitness_by_iter.append(fitness_values)\r\n\r\n sns.boxplot(iters, fitness_by_iter, color=\"seagreen\")\r\n\r\n plt.title('Fitness history by generations')\r\n plt.ylabel('Fitness')\r\n plt.xlabel('Generation, #')\r\n plt.show()\r\n\r\n\r\ndef show_fitness_history_all(history_runs, iterations, with_bands=False):\r\n color_to_take = cycle('bgrcmykw')\r\n iters = [it for it in range(iterations)]\r\n\r\n if not with_bands:\r\n for history in history_runs:\r\n sns.tsplot(history, iters, legend=True, color=next(color_to_take))\r\n plt.legend(labels=[idx for idx in range(len(history_runs))], loc='lower right')\r\n else:\r\n sns.tsplot(history_runs, iters, legend=True, color=next(color_to_take))\r\n plt.ylabel('Fitness')\r\n plt.xlabel('Iteration, #')\r\n plt.show()\r\n\r\n\r\ndef show_history_optimization_comparison(first, second, third, fourth,\r\n iterations,\r\n label_first, label_second, label_third, label_fourth):\r\n color_to_take = cycle('bgrcmykw')\r\n # iters = [it for it in range(iterations_first)]\r\n plt.yticks(fontsize=12)\r\n sns.tsplot(first, iterations, legend=True, color=next(color_to_take))\r\n sns.tsplot(second, iterations, legend=True, color=next(color_to_take))\r\n sns.tsplot(third, iterations, legend=True, color=next(color_to_take))\r\n sns.tsplot(fourth, iterations, legend=True, color=next(color_to_take))\r\n plt.xticks(range(0, max(iterations)),fontsize=13)\r\n\r\n plt.legend(labels=[label_first, label_second, label_third, label_fourth],loc='lower right',fontsize=12)\r\n plt.ylabel('Best fitness',fontsize=13)\r\n plt.xlabel('Iteration, #', fontsize = 13)\r\n plt.tight_layout()\r\n plt.show()",
"import numpy as np\r\nfrom sklearn.datasets import make_regression\r\nfrom sklearn.metrics import mean_squared_error as mse\r\n\r\nfrom core.composer.chain import Chain\r\nfrom core.composer.composer import DummyComposer, DummyChainTypeEnum, ComposerRequirements\r\nfrom core.models.data import InputData, train_test_data_setup\r\nfrom core.repository.model_types_repository import ModelTypesIdsEnum\r\nfrom core.repository.quality_metrics_repository import MetricsRepository, RegressionMetricsEnum\r\nfrom core.repository.task_types import MachineLearningTasksEnum\r\n\r\n\r\ndef compose_chain(data: InputData) -> Chain:\r\n dummy_composer = DummyComposer(DummyChainTypeEnum.hierarchical)\r\n composer_requirements = ComposerRequirements(primary=[ModelTypesIdsEnum.lasso, ModelTypesIdsEnum.ridge],\r\n secondary=[ModelTypesIdsEnum.linear])\r\n\r\n metric_function = MetricsRepository().metric_by_id(RegressionMetricsEnum.RMSE)\r\n\r\n chain = dummy_composer.compose_chain(data=data,\r\n initial_chain=None,\r\n composer_requirements=composer_requirements,\r\n metrics=metric_function, is_visualise=False)\r\n return chain\r\n\r\n\r\ndef get_synthetic_regression_data(n_samples=10000, n_features=10, random_state=None) -> InputData:\r\n synthetic_data = make_regression(n_samples=n_samples, n_features=n_features, random_state=random_state)\r\n input_data = InputData(idx=np.arange(0, len(synthetic_data[1])),\r\n features=synthetic_data[0],\r\n target=synthetic_data[1],\r\n task_type=MachineLearningTasksEnum.regression)\r\n return input_data\r\n\r\n\r\ndef get_rmse_value(chain: Chain, train_data: InputData, test_data: InputData) -> (float, float):\r\n train_pred = chain.predict(input_data=train_data)\r\n test_pred = chain.predict(input_data=test_data)\r\n rmse_value_test = mse(y_true=test_data.target, y_pred=test_pred.predict, squared=False)\r\n rmse_value_train = mse(y_true=train_data.target, y_pred=train_pred.predict, squared=False)\r\n\r\n return rmse_value_train, rmse_value_test\r\n\r\n\r\ndef test_regression_chain_fit_correct():\r\n data = get_synthetic_regression_data()\r\n\r\n chain = compose_chain(data=data)\r\n train_data, test_data = train_test_data_setup(data)\r\n\r\n chain.fit(input_data=train_data)\r\n _, rmse_on_test = get_rmse_value(chain, train_data, test_data)\r\n\r\n rmse_threshold = np.std(data.target) * 0.05\r\n assert rmse_on_test < rmse_threshold\r\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"sklearn.datasets.make_regression",
"numpy.std",
"sklearn.metrics.mean_squared_error"
]
] |
Dom1L/schnetpack
|
[
"0d5366fc49c20bca48f6f491fc7f1e283f324bc6"
] |
[
"src/schnetpack/data/atoms.py"
] |
[
"\"\"\"\nThis module contains all functionalities required to load atomistic data,\ngenerate batches and compute statistics. It makes use of the ASE database\nfor atoms [#ase2]_.\n\nReferences\n----------\n.. [#ase2] Larsen, Mortensen, Blomqvist, Castelli, Christensen, Dułak, Friis,\n Groves, Hammer, Hargus:\n The atomic simulation environment -- a Python library for working with atoms.\n Journal of Physics: Condensed Matter, 9, 27. 2017.\n\"\"\"\n\nimport logging\nimport os\nimport warnings\nfrom base64 import b64encode, b64decode\n\nimport numpy as np\nimport torch\nfrom ase.db import connect\nfrom torch.utils.data import Dataset\n\nfrom schnetpack.environment import SimpleEnvironmentProvider, collect_atom_triples\nfrom .definitions import Structure\nfrom .partitioning import train_test_split\n\nlogger = logging.getLogger(__name__)\n\n\nclass AtomsDataError(Exception):\n pass\n\n\nclass AtomsData(Dataset):\n ENCODING = \"utf-8\"\n available_properties = None\n\n def __init__(\n self,\n dbpath,\n subset=None,\n required_properties=[],\n environment_provider=SimpleEnvironmentProvider(),\n collect_triples=False,\n center_positions=True,\n load_charge=False,\n ):\n self.dbpath = dbpath\n self.subset = subset\n self.required_properties = required_properties\n if required_properties is None:\n self.required_properties = self.available_properties\n self.environment_provider = environment_provider\n self.collect_triples = collect_triples\n self.centered = center_positions\n self.load_charge = load_charge\n\n def create_splits(self, num_train=None, num_val=None, split_file=None):\n warnings.warn(\n \"create_splits is deprecated, \"\n + \"use schnetpack.data.train_test_split instead\",\n DeprecationWarning,\n )\n return train_test_split(self, num_train, num_val, split_file)\n\n def create_subset(self, idx):\n \"\"\"\n Returns a new dataset that only consists of provided indices.\n Args:\n idx (numpy.ndarray): subset indices\n\n Returns:\n schnetpack.data.AtomsData: dataset with subset of original data\n \"\"\"\n idx = np.array(idx)\n subidx = (\n idx if self.subset is None or len(idx) == 0 else np.array(self.subset)[idx]\n )\n return type(self)(\n self.dbpath,\n subidx,\n self.required_properties,\n self.environment_provider,\n self.collect_triples,\n self.centered,\n self.load_charge,\n )\n\n def __len__(self):\n if self.subset is None:\n with connect(self.dbpath) as conn:\n return conn.count()\n return len(self.subset)\n\n def __getitem__(self, idx):\n at, properties = self.get_properties(idx)\n\n # get atom environment\n nbh_idx, offsets = self.environment_provider.get_environment(at)\n\n properties[Structure.neighbors] = torch.LongTensor(nbh_idx.astype(np.int))\n properties[Structure.cell_offset] = torch.FloatTensor(\n offsets.astype(np.float32)\n )\n properties[\"_idx\"] = torch.LongTensor(np.array([idx], dtype=np.int))\n\n if self.collect_triples:\n nbh_idx_j, nbh_idx_k = collect_atom_triples(nbh_idx)\n properties[Structure.neighbor_pairs_j] = torch.LongTensor(\n nbh_idx_j.astype(np.int)\n )\n properties[Structure.neighbor_pairs_k] = torch.LongTensor(\n nbh_idx_k.astype(np.int)\n )\n\n return properties\n\n def _subset_index(self, idx):\n # get row\n if self.subset is None:\n idx = int(idx)\n else:\n idx = int(self.subset[idx])\n return idx\n\n def get_atoms(self, idx):\n \"\"\"\n Return atoms of provided index.\n\n Args:\n idx (int): atoms index\n\n Returns:\n ase.Atoms: atoms data\n\n \"\"\"\n idx = self._subset_index(idx)\n with connect(self.dbpath) as conn:\n row = conn.get(idx + 1)\n at = row.toatoms()\n return at\n\n def get_metadata(self, key):\n with connect(self.dbpath) as conn:\n if key in conn.metadata.keys():\n return conn.metadata[key]\n return None\n\n def set_metadata(self, metadata):\n with connect(self.dbpath) as conn:\n conn.metadata = metadata\n\n def _add_system(self, conn, atoms, **properties):\n\n data = {}\n\n props = (\n properties.keys()\n if self.available_properties is None\n else self.available_properties\n )\n\n for pname in props:\n try:\n prop = properties[pname]\n except:\n raise AtomsDataError(\"Required property missing:\" + pname)\n\n try:\n pshape = prop.shape\n ptype = prop.dtype\n except:\n raise AtomsDataError(\n \"Required property `\" + pname + \"` has to be `numpy.ndarray`.\"\n )\n\n base64_bytes = b64encode(prop.tobytes())\n base64_string = base64_bytes.decode(AtomsData.ENCODING)\n data[pname] = base64_string\n data[\"_shape_\" + pname] = pshape\n data[\"_dtype_\" + pname] = str(ptype)\n\n conn.write(atoms, data=data)\n\n def add_system(self, atoms, **properties):\n with connect(self.dbpath) as conn:\n self._add_system(conn, atoms, **properties)\n\n def add_systems(self, atoms, property_list):\n with connect(self.dbpath) as conn:\n for at, prop in zip(atoms, property_list):\n self._add_system(conn, at, **prop)\n\n def get_properties(self, idx):\n idx = self._subset_index(idx)\n with connect(self.dbpath) as conn:\n row = conn.get(idx + 1)\n at = row.toatoms()\n\n # extract properties\n properties = {}\n for pname in self.required_properties:\n # new data format\n try:\n shape = row.data[\"_shape_\" + pname]\n dtype = row.data[\"_dtype_\" + pname]\n prop = np.frombuffer(b64decode(row.data[pname]), dtype=dtype)\n prop = prop.reshape(shape)\n except:\n # fallback for properties stored directly\n # in the row\n if pname in row:\n prop = row[pname]\n else:\n prop = row.data[pname]\n\n try:\n prop.shape\n except AttributeError as e:\n prop = np.array([prop], dtype=np.float32)\n\n properties[pname] = torch.FloatTensor(prop)\n\n if self.load_charge:\n if Structure.charge in row.data.keys():\n shape = row.data[\"_shape_\" + Structure.charge]\n dtype = row.data[\"_dtype_\" + Structure.charge]\n prop = np.frombuffer(b64decode(row.data[Structure.charge]), dtype=dtype)\n prop = prop.reshape(shape)\n properties[Structure.charge] = torch.FloatTensor(prop)\n else:\n properties[Structure.charge] = torch.FloatTensor(\n np.array([0.0], dtype=np.float32)\n )\n\n # extract/calculate structure\n properties[Structure.Z] = torch.LongTensor(at.numbers.astype(np.int))\n positions = at.positions.astype(np.float32)\n if self.centered:\n positions -= at.get_center_of_mass()\n properties[Structure.R] = torch.FloatTensor(positions)\n properties[Structure.cell] = torch.FloatTensor(at.cell.astype(np.float32))\n\n return at, properties\n\n def get_atomref(self, property):\n \"\"\"\n Returns atomref for property.\n\n Args:\n property: property in the qm9 dataset\n\n Returns:\n list: list with atomrefs\n \"\"\"\n labels = self.get_metadata(\"atref_labels\")\n if labels is None:\n return None\n\n col = [i for i, l in enumerate(labels) if l == property]\n assert len(col) <= 1\n\n if len(col) == 1:\n col = col[0]\n atomref = np.array(self.get_metadata(\"atomrefs\"))[:, col : col + 1]\n else:\n atomref = None\n\n return atomref\n\n def get_atomrefs(self, properties):\n \"\"\"\n Return multiple atomrefs as dict.\n\n Args:\n properties (list or str): Desired properties for which the atomrefs are\n calculated.\n\n Returns:\n dict: atomic references\n \"\"\"\n if type(properties) is not list:\n properties = [properties]\n return {p: self.get_atomref(p) for p in properties}\n\n\nclass DownloadableAtomsData(AtomsData):\n def __init__(\n self,\n dbpath,\n subset=None,\n required_properties=None,\n environment_provider=SimpleEnvironmentProvider(),\n collect_triples=False,\n center_positions=True,\n load_charge=False,\n download=False,\n ):\n\n super(DownloadableAtomsData, self).__init__(\n dbpath,\n subset,\n required_properties,\n environment_provider,\n collect_triples,\n center_positions,\n load_charge,\n )\n if download:\n self.download()\n\n def download(self):\n \"\"\"\n Wrapper function for the download method.\n \"\"\"\n if os.path.exists(self.dbpath):\n logger.info(\n \"The dataset has already been downloaded and stored \"\n \"at {}\".format(self.dbpath)\n )\n else:\n logger.info(\"Starting download\")\n folder = os.path.dirname(os.path.abspath(self.dbpath))\n if not os.path.exists(folder):\n os.makedirs(folder)\n self._download()\n\n def _download(self):\n \"\"\"\n To be implemented in deriving classes.\n \"\"\"\n raise NotImplementedError\n"
] |
[
[
"numpy.array",
"torch.FloatTensor"
]
] |
DivJAth/OpticalFlow_MaskRCNN
|
[
"5d93a4a6359f574c02d0ef2e181221303ced2643"
] |
[
"vision/torchvision/models/detection/faster_rcnn.py"
] |
[
"\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n# import torchvision\nfrom torchvision import models\nfrom torchvision.ops import misc as misc_nn_ops\nfrom torchvision.ops import MultiScaleRoIAlign\n\nfrom torchvision.models.utils import load_state_dict_from_url\n\nfrom backbone_utils import resnet_fpn_backbone\n\nfrom generalized_mask_rcnn import GeneralizedRCNN\nfrom rpn import AnchorGenerator, RPNHead, RegionProposalNetwork\nfrom roi_heads import RoIHeads\nfrom transform import GeneralizedRCNNTransform\n\nfrom torchvision import transforms\n\n__all__ = [\n \"FasterRCNN\", \"fasterrcnn_resnet50_fpn\",\n]\n\n\nclass FasterRCNN(GeneralizedRCNN):\n \"\"\"\n Implements Faster R-CNN.\n\n The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each\n image, and should be in 0-1 range. Different images can have different sizes.\n\n The behavior of the model changes depending if it is in training or evaluation mode.\n\n During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n containing:\n - boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values\n between 0 and H and 0 and W\n - labels (Int64Tensor[N]): the class label for each ground-truth box\n\n The model returns a Dict[Tensor] during training, containing the classification and regression\n losses for both the RPN and the R-CNN.\n\n During inference, the model requires only the input tensors, and returns the post-processed\n predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as\n follows:\n - boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values between\n 0 and H and 0 and W\n - labels (Int64Tensor[N]): the predicted labels for each image\n - scores (Tensor[N]): the scores or each prediction\n\n Arguments:\n backbone (nn.Module): the network used to compute the features for the model.\n It should contain a out_channels attribute, which indicates the number of output\n channels that each feature map has (and it should be the same for all feature maps).\n The backbone should return a single Tensor or and OrderedDict[Tensor].\n num_classes (int): number of output classes of the model (including the background).\n If box_predictor is specified, num_classes should be None.\n min_size (int): minimum size of the image to be rescaled before feeding it to the backbone\n max_size (int): maximum size of the image to be rescaled before feeding it to the backbone\n image_mean (Tuple[float, float, float]): mean values used for input normalization.\n They are generally the mean values of the dataset on which the backbone has been trained\n on\n image_std (Tuple[float, float, float]): std values used for input normalization.\n They are generally the std values of the dataset on which the backbone has been trained on\n rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature\n maps.\n rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN\n rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training\n rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing\n rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training\n rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing\n rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals\n rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be\n considered as positive during training of the RPN.\n rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be\n considered as negative during training of the RPN.\n rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN\n for computing the loss\n rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training\n of the RPN\n box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in\n the locations indicated by the bounding boxes\n box_head (nn.Module): module that takes the cropped feature maps as input\n box_predictor (nn.Module): module that takes the output of box_head and returns the\n classification logits and box regression deltas.\n box_score_thresh (float): during inference, only return proposals with a classification score\n greater than box_score_thresh\n box_nms_thresh (float): NMS threshold for the prediction head. Used during inference\n box_detections_per_img (int): maximum number of detections per image, for all classes.\n box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be\n considered as positive during training of the classification head\n box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be\n considered as negative during training of the classification head\n box_batch_size_per_image (int): number of proposals that are sampled during training of the\n classification head\n box_positive_fraction (float): proportion of positive proposals in a mini-batch during training\n of the classification head\n bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the\n bounding boxes\n\n Example::\n\n >>> import torch\n >>> import torchvision\n >>> from torchvision.models.detection import FasterRCNN\n >>> from torchvision.models.detection.rpn import AnchorGenerator\n >>> # load a pre-trained model for classification and return\n >>> # only the features\n >>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features\n >>> # FasterRCNN needs to know the number of\n >>> # output channels in a backbone. For mobilenet_v2, it's 1280\n >>> # so we need to add it here\n >>> backbone.out_channels = 1280\n >>>\n >>> # let's make the RPN generate 5 x 3 anchors per spatial\n >>> # location, with 5 different sizes and 3 different aspect\n >>> # ratios. We have a Tuple[Tuple[int]] because each feature\n >>> # map could potentially have different sizes and\n >>> # aspect ratios\n >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),\n >>> aspect_ratios=((0.5, 1.0, 2.0),))\n >>>\n >>> # let's define what are the feature maps that we will\n >>> # use to perform the region of interest cropping, as well as\n >>> # the size of the crop after rescaling.\n >>> # if your backbone returns a Tensor, featmap_names is expected to\n >>> # be [0]. More generally, the backbone should return an\n >>> # OrderedDict[Tensor], and in featmap_names you can choose which\n >>> # feature maps to use.\n >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],\n >>> output_size=7,\n >>> sampling_ratio=2)\n >>>\n >>> # put the pieces together inside a FasterRCNN model\n >>> model = FasterRCNN(backbone,\n >>> num_classes=2,\n >>> rpn_anchor_generator=anchor_generator,\n >>> box_roi_pool=roi_pooler)\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n \"\"\"\n\n def __init__(self, backbone, num_classes=None,\n # transform parameters\n min_size=800, max_size=1333,\n image_mean=None, image_std=None,\n # RPN parameters\n rpn_anchor_generator=None, rpn_head=None,\n rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,\n rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,\n rpn_nms_thresh=0.7,\n rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,\n rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,\n # Box parameters\n box_roi_pool=None, box_head=None, box_predictor=None,\n box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,\n box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,\n box_batch_size_per_image=512, box_positive_fraction=0.25,\n bbox_reg_weights=None):\n\n if not hasattr(backbone, \"out_channels\"):\n raise ValueError(\n \"backbone should contain an attribute out_channels \"\n \"specifying the number of output channels (assumed to be the \"\n \"same for all the levels)\")\n\n assert isinstance(rpn_anchor_generator, (AnchorGenerator, type(None)))\n assert isinstance(box_roi_pool, (MultiScaleRoIAlign, type(None)))\n\n if num_classes is not None:\n if box_predictor is not None:\n raise ValueError(\"num_classes should be None when box_predictor is specified\")\n else:\n if box_predictor is None:\n raise ValueError(\"num_classes should not be None when box_predictor \"\n \"is not specified\")\n\n out_channels = backbone.out_channels\n\n if rpn_anchor_generator is None:\n anchor_sizes = ((32,), (64,), (128,), (256,), (512,))\n aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)\n rpn_anchor_generator = AnchorGenerator(\n anchor_sizes, aspect_ratios\n )\n if rpn_head is None:\n rpn_head = RPNHead(\n out_channels, rpn_anchor_generator.num_anchors_per_location()[0]\n )\n\n rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test)\n rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test)\n\n rpn = RegionProposalNetwork(\n rpn_anchor_generator, rpn_head,\n rpn_fg_iou_thresh, rpn_bg_iou_thresh,\n rpn_batch_size_per_image, rpn_positive_fraction,\n rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_nms_thresh)\n\n if box_roi_pool is None:\n box_roi_pool = MultiScaleRoIAlign(\n featmap_names=['0', '1', '2', '3'],\n output_size=7,\n sampling_ratio=2)\n\n if box_head is None:\n resolution = box_roi_pool.output_size[0]\n representation_size = 1024\n box_head = TwoMLPHead(\n out_channels * resolution ** 2,\n representation_size)\n\n if box_predictor is None:\n representation_size = 1024\n box_predictor = FastRCNNPredictor(\n representation_size,\n num_classes)\n\n roi_heads = RoIHeads(\n # Box\n box_roi_pool, box_head, box_predictor,\n box_fg_iou_thresh, box_bg_iou_thresh,\n box_batch_size_per_image, box_positive_fraction,\n bbox_reg_weights,\n box_score_thresh, box_nms_thresh, box_detections_per_img)\n\n if image_mean is None:\n image_mean = [0.485, 0.456, 0.406]\n if image_std is None:\n image_std = [0.229, 0.224, 0.225]\n transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)\n\n super(FasterRCNN, self).__init__(backbone, rpn, roi_heads, transform)\n\n\nclass TwoMLPHead(nn.Module):\n \"\"\"\n Standard heads for FPN-based models\n\n Arguments:\n in_channels (int): number of input channels\n representation_size (int): size of the intermediate representation\n \"\"\"\n\n def __init__(self, in_channels, representation_size):\n super(TwoMLPHead, self).__init__()\n\n self.fc6 = nn.Linear(in_channels, representation_size)\n self.fc7 = nn.Linear(representation_size, representation_size)\n\n def forward(self, x):\n x = x.flatten(start_dim=1)\n\n x = F.relu(self.fc6(x))\n x = F.relu(self.fc7(x))\n\n return x\n\n\nclass FastRCNNPredictor(nn.Module):\n \"\"\"\n Standard classification + bounding box regression layers\n for Fast R-CNN.\n\n Arguments:\n in_channels (int): number of input channels\n num_classes (int): number of output classes (including background)\n \"\"\"\n\n def __init__(self, in_channels, num_classes):\n super(FastRCNNPredictor, self).__init__()\n self.cls_score = nn.Linear(in_channels, num_classes)\n self.bbox_pred = nn.Linear(in_channels, num_classes * 4)\n\n def forward(self, x):\n if x.dim() == 4:\n assert list(x.shape[2:]) == [1, 1]\n x = x.flatten(start_dim=1)\n scores = self.cls_score(x)\n bbox_deltas = self.bbox_pred(x)\n\n return scores, bbox_deltas\n\n\nmodel_urls = {\n 'fasterrcnn_resnet50_fpn_coco':\n 'https://download.pytorch.org/models/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth',\n}\n\n\ndef fasterrcnn_resnet50_fpn(pretrained=False, progress=True,\n num_classes=91, pretrained_backbone=True, **kwargs):\n \"\"\"\n Constructs a Faster R-CNN model with a ResNet-50-FPN backbone.\n\n The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each\n image, and should be in ``0-1`` range. Different images can have different sizes.\n\n The behavior of the model changes depending if it is in training or evaluation mode.\n\n During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n containing:\n - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with values\n between ``0`` and ``H`` and ``0`` and ``W``\n - labels (``Int64Tensor[N]``): the class label for each ground-truth box\n\n The model returns a ``Dict[Tensor]`` during training, containing the classification and regression\n losses for both the RPN and the R-CNN.\n\n During inference, the model requires only the input tensors, and returns the post-processed\n predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as\n follows:\n - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with values between\n ``0`` and ``H`` and ``0`` and ``W``\n - labels (``Int64Tensor[N]``): the predicted labels for each image\n - scores (``Tensor[N]``): the scores or each prediction\n\n Example::\n\n >>> model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n\n Arguments:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n if pretrained:\n # no need to download the backbone if pretrained is set\n pretrained_backbone = False\n backbone = resnet_fpn_backbone('resnet50', pretrained_backbone)\n model = FasterRCNN(backbone, num_classes, **kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls['fasterrcnn_resnet50_fpn_coco'],\n progress=progress)\n model.load_state_dict(state_dict)\n return model\n"
] |
[
[
"torch.nn.Linear"
]
] |
PrasadHonrao/python-samples
|
[
"faa48aa3eaf2d67b8cef0114e1f6ef08e2c1300a"
] |
[
"data_analysis/matplotlib/matplotlib-histrogram.py"
] |
[
"import matplotlib.pyplot as plt\n\nlife_expetency = [\n 43.828000000000003,\n 76.423000000000002,\n 72.301000000000002,\n 42.731000000000002,\n 75.319999999999993,\n 81.234999999999999,\n 79.828999999999994,\n 75.635000000000005,\n 64.061999999999998,\n 79.441000000000003,\n 56.728000000000002,\n 65.554000000000002,\n 74.852000000000004,\n 50.728000000000002,\n 72.390000000000001,\n 73.004999999999995,\n 52.295000000000002,\n 49.579999999999998,\n 59.722999999999999,\n 50.43,\n 80.653000000000006,\n 44.741000000000007,\n 50.651000000000003,\n 78.552999999999997,\n 72.960999999999999,\n 72.888999999999996,\n 65.152000000000001,\n 46.462000000000003,\n 55.322000000000003,\n 78.781999999999996,\n 48.328000000000003,\n 75.748000000000005,\n 78.272999999999996,\n 76.486000000000004,\n 78.331999999999994,\n 54.790999999999997,\n 72.234999999999999,\n 74.994,\n 71.338000000000022,\n 71.878,\n 51.578999999999994,\n 58.039999999999999,\n 52.947000000000003,\n 79.313000000000002,\n 80.656999999999996,\n 56.734999999999999,\n 59.448,\n 79.406000000000006,\n 60.021999999999998,\n 79.483000000000004,\n 70.259,\n 56.006999999999998,\n 46.388000000000012,\n 60.915999999999997,\n 70.198000000000008,\n 82.207999999999998,\n 73.338000000000022,\n 81.757000000000005,\n 64.698000000000008,\n 70.650000000000006,\n 70.963999999999999,\n 59.545000000000002,\n 78.885000000000005,\n 80.745000000000005,\n 80.546000000000006,\n 72.566999999999993,\n 82.602999999999994,\n 72.534999999999997,\n 54.109999999999999,\n 67.296999999999997,\n 78.623000000000005,\n 77.588000000000022,\n 71.992999999999995,\n 42.591999999999999,\n 45.677999999999997,\n 73.951999999999998,\n 59.443000000000012,\n 48.302999999999997,\n 74.241,\n 54.466999999999999,\n 64.164000000000001,\n 72.801000000000002,\n 76.194999999999993,\n 66.802999999999997,\n 74.543000000000006,\n 71.164000000000001,\n 42.082000000000001,\n 62.069000000000003,\n 52.906000000000013,\n 63.784999999999997,\n 79.762,\n 80.203999999999994,\n 72.899000000000001,\n 56.866999999999997,\n 46.859000000000002,\n 80.195999999999998,\n 75.640000000000001,\n 65.483000000000004,\n 75.536999999999978,\n 71.751999999999995,\n 71.421000000000006,\n 71.688000000000002,\n 75.563000000000002,\n 78.097999999999999,\n 78.746000000000024,\n 76.441999999999993,\n 72.475999999999999,\n 46.241999999999997,\n 65.528000000000006,\n 72.777000000000001,\n 63.061999999999998,\n 74.001999999999995,\n 42.568000000000012,\n 79.971999999999994,\n 74.662999999999997,\n 77.926000000000002,\n 48.158999999999999,\n 49.338999999999999,\n 80.941000000000003,\n 72.396000000000001,\n 58.555999999999997,\n 39.613,\n 80.884,\n 81.701000000000022,\n 74.143000000000001,\n 78.400000000000006,\n 52.517000000000003,\n 70.616,\n 58.420000000000002,\n 69.819000000000003,\n 73.923000000000002,\n 71.777000000000001,\n 51.542000000000002,\n 79.424999999999997,\n 78.242000000000004,\n 76.384,\n 73.747,\n 74.248999999999995,\n 73.421999999999997,\n 62.698,\n 42.383999999999993,\n 43.487000000000002 \n]\n\nplt.hist(life_expetency) #build histogram with default 10 bins\nplt.show()\n\nplt.clf() # clear the cached chart\nplt.hist(life_expetency, bins=15) #build histogram with default 10 bins\nplt.show()\n\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.clf"
]
] |
Shivankthakur/Rhythm-Search-Engine-for-song-lyrics
|
[
"560382b115b71d04b7ab8a828793537a8a93e3de"
] |
[
"GUI application/app.py"
] |
[
"from flask import Flask, render_template, request, redirect, url_for\r\nfrom nltk.corpus import stopwords\r\nimport nltk\r\nnltk.download('stopwords')\r\nfrom nltk.tokenize import TreebankWordTokenizer\r\nfrom nltk.stem.porter import *\r\nimport numpy\r\nfrom math import log,sqrt\r\nimport pandas as pd\r\nimport timeit\r\nimport pickle\r\nfrom tabulate import tabulate\r\n\r\n#model and query stuff\r\ndef tokenize_sentence(sentence): # Tokenize a string and remove punctuations\r\n#arguments : string\r\n sentence=sentence.lower()\r\n tokenizer=TreebankWordTokenizer()\r\n tokens_list_with_punct = tokenizer.tokenize(sentence.lower())\r\n tokens_list_without_punct=[]\r\n for x in tokens_list_with_punct:\r\n if x.isalpha():\r\n tokens_list_without_punct.append(x)\r\n return tokens_list_without_punct\r\n\r\ndef remove_stopwords(words): # Remove english stop words from a string\r\n#arguments : List of strings\r\n stopwords_list=stopwords.words('english')\r\n filtered_words=[]\r\n for x in words:\r\n if x not in stopwords_list:\r\n filtered_words.append(x)\r\n return filtered_words\r\n\r\ndef stem_words(words_list): # Stem words in a string\r\n#arguments : List of strings\r\n ps=PorterStemmer()\r\n stemmed_words=[]\r\n for x in range(len(words_list)):\r\n stemmed_words.append(ps.stem(words_list[x]))\r\n return stemmed_words\r\n\r\ndef preprocess_sentence(sentence): # Method to call above defined preprocessing tasks on a string\r\n#arguments : string\r\n return stem_words(remove_stopwords(tokenize_sentence(sentence)))\r\n\r\n# actual model definition\r\nclass IR_model():\r\n def __init__(self):\r\n# constructor method\r\n self.__word_dict={} #dictionary of all words in corpus.Each unique word is mapped to an index\r\n self.__word_dict_size=0 #size of word_dict\r\n self.__doc_list_size=0 #number of docs in corpus\r\n self.__score_matrix=None #2D matrix to store tf-idf score\r\n \r\n def __addDocument(self,document): #adds document to the matrix and store tf in cells\r\n # arguments : corpus as a list of tuples ; No return type. \r\n for word in document[1]:\r\n self.__score_matrix[self.__doc_list_size][self.__word_dict[word]]+=1\r\n self.__doc_list_size+=1\r\n\r\n def build_Vector_Space(self,documents): # computes values for cells in vector space\r\n #arguments : corpus as a list of tuples ; No return type. \r\n for document in documents: # assign index to unique words\r\n for word in document[1]:\r\n if word not in self.__word_dict:\r\n self.__word_dict[word]=self.__word_dict_size\r\n self.__word_dict_size+=1\r\n\r\n self.__score_matrix=numpy.zeros((len(documents),self.__word_dict_size))\r\n \r\n for document in documents: # assign tf value to cells\r\n self.__addDocument(document)\r\n\r\n idf=numpy.zeros((self.__word_dict_size))\r\n df=numpy.zeros((self.__word_dict_size))\r\n\r\n for word in self.__word_dict: # calculate df of all words in corpus\r\n x=self.__word_dict[word]\r\n for i in range(len(self.__score_matrix)):\r\n if self.__score_matrix[i][x]!=0:\r\n df[x]+=1\r\n\r\n for i in range(self.__word_dict_size): # calculate idf of all words in corpus\r\n idf[i]=log(len(self.__score_matrix)/df[i])\r\n\r\n for i in range(len(self.__score_matrix)):# fill cells with tf-idf score\r\n for j in range(self.__word_dict_size):\r\n if self.__score_matrix[i][j]!=0:\r\n self.__score_matrix[i][j]=(1+log(self.__score_matrix[i][j]))*(idf[j])\r\n \r\n def Search(self,query,documents): # Finds the tf_idf score of query for all docs in corpus and returns it\r\n#arguments : user query as list of terms, corpus as list of tuples\r\n#Return type : list of tuples\r\n query_df=numpy.zeros((self.__word_dict_size))\r\n query_idf=numpy.zeros((self.__word_dict_size))\r\n for word in query: # for each word in query, find df and idf\r\n if word in self.__word_dict:\r\n if query_df[self.__word_dict[word]]==0:\r\n for document in documents:\r\n if word in document[1]:\r\n query_df[self.__word_dict[word]]+=1\r\n query_idf[self.__word_dict[word]]=log(len(self.__score_matrix)/query_df[self.__word_dict[word]])\r\n\r\n query_score=[] # To store score between query and each document\r\n for document in documents: # Finding tf for each term in query for each doc and find score\r\n query_tf=numpy.zeros(self.__word_dict_size)\r\n score=0\r\n for word in query:\r\n if word in self.__word_dict:\r\n if query_tf[self.__word_dict[word]]==0:\r\n for term in document[1]:\r\n if term==word:\r\n query_tf[self.__word_dict[word]]+=1\r\n if query_tf[self.__word_dict[word]] > 0 :\r\n query_tf[self.__word_dict[word]]=1+log(query_tf[self.__word_dict[word]])\r\n score+= query_tf[self.__word_dict[word]] * query_idf[self.__word_dict[word]] \r\n # score(query,doc)=sum of score for all terms in query and doc\r\n query_score.append([(score,document[0])]) # append doc id and score for returning\r\n \r\n return query_score \r\n\r\n#loading dataset and pickled files\r\ndf = pd.read_csv(\"Song.csv\", error_bad_lines=False)\r\n\r\npickle_in = open(\"docs.pickle\", \"rb\")\r\ndocs = pickle.load(pickle_in)\r\npickle_in.close()\r\n\r\npickle_in = open(\"model.pickle\", \"rb\")\r\nModel = pickle.load(pickle_in)\r\npickle_in.close()\r\n\r\n\r\ndef requestResults(name):\r\n start = timeit.default_timer()\r\n\r\n # Preprocess query text\r\n q = name\r\n q=preprocess_sentence(q)\r\n\r\n # Search for query in model\r\n f=Model.Search(q,docs)\r\n\r\n # arrange/select top10 results\r\n isallzeros = 1\r\n f.sort(reverse=True)\r\n top10=[]\r\n for i in range(10):\r\n for j in f[i]:\r\n if j[0] > 0:\r\n isallzeros = 0\r\n if isallzeros == 1 :\r\n return tabulate([' '], headers=['Sorry no match found'], tablefmt='html')\r\n #return 'Sorry, no match found'\r\n for i in range(10):\r\n for j in f[i]:\r\n top10.append(j[1])\r\n\r\n ans=[]\r\n count = 1\r\n\r\n for i in top10:\r\n ans.append([count, df.iloc[i]['Artist Name'],df.iloc[i]['Song Name'],df.iloc[i]['Clean Lyrics']])\r\n count += 1\r\n\r\n table = ans\r\n\r\n stop = timeit.default_timer()\r\n return '<body style=\"background-color: #E0FFFF;\">' + \"Top10 Results in: \" + '<b>' + str(stop-start) + \"</b>\" + \" seconds <br><br>\" + tabulate(table, headers=['Rank','Artist Name','Song Name','Lyrics'], tablefmt='html') + \"</body>\"\r\n\r\n\r\n\r\n# start Flask\r\napp = Flask(__name__)\r\n\r\n# render default home webpage\r\[email protected]('/')\r\ndef hello():\r\n return render_template('home.html')\r\n\r\n#when post method detected, redirect to success function\r\[email protected]('/', methods=['POST', 'GET'])\r\ndef get_data():\r\n if request.method == 'POST':\r\n user = request.form['search']\r\n return redirect(url_for('success', name=user))\r\n\r\n#get the data/results for requested query\r\[email protected]('/success/<name>')\r\ndef success(name):\r\n return str(requestResults(name))\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n\r\n"
] |
[
[
"pandas.read_csv",
"numpy.zeros"
]
] |
AutumnCrocus/shadow_sim
|
[
"79ad13ff9bd7131c82f269af32a3970f3e4bf2ca"
] |
[
"make_NN_vs_other.py"
] |
[
"from torch.multiprocessing import Pool, Process, set_start_method,cpu_count, RLock\r\n\r\ntry:\r\n set_start_method('spawn')\r\n print(\"spawn is run.\")\r\n #set_start_method('fork') GPU使用時CUDA initializationでerror\r\n #print('fork')\r\nexcept RuntimeError:\r\n pass\r\n\r\nfrom emulator_test import * # importの依存関係により必ず最初にimport\r\nfrom Field_setting import *\r\nfrom Player_setting import *\r\nfrom Policy import *\r\nfrom Game_setting import Game\r\nfrom torch.utils.tensorboard import SummaryWriter\r\n\r\nfrom Embedd_Network_model import *\r\nimport copy\r\nimport datetime\r\n# net = New_Dual_Net(100)\r\nimport os\r\n\r\nparser = argparse.ArgumentParser(description='デュアルニューラルネットワーク学習コード')\r\n\r\nparser.add_argument('--decklists', help='使用するデッキリスト')\r\nparser.add_argument('--opponent', help='対戦相手のAIタイプ')\r\nparser.add_argument('--model_name', help='使用モデル')\r\nparser.add_argument('--iteration', help='各組み合わせの対戦回数')\r\nargs = parser.parse_args()\r\ndeck_id_2_name = {0: \"Sword_Aggro\", 1: \"Rune_Earth\", 2: \"Sword\", 3: \"Shadow\", 4: \"Dragon_PDK\", 5: \"Haven\",\r\n 6: \"Blood\", 7: \"Dragon\", 8: \"Forest\", 9: \"Rune\", 10: \"DS_Rune\", -1: \"Forest_Basic\",\r\n -2: \"Sword_Basic\",\r\n -3: \"Rune_Basic\",\r\n -4: \"Dragon_Basic\", -5: \"FOREST_Basic\", -6: \"Blood_Basic\", -7: \"Haven_Basic\",\r\n -8: \"Portal_Basic\",\r\n 100: \"Test\",\r\n -9: \"Spell-Rune\", 11: \"PtP-Forest\", 12: \"Mid-Shadow\", 13: \"Neutral-Blood\"}\r\n\r\nkey_2_tsv_name = {0: [\"Sword_Aggro.tsv\", \"SWORD\"], 1: [\"Rune_Earth.tsv\", \"RUNE\"], 2: [\"Sword.tsv\", \"SWORD\"],\r\n 3: [\"New-Shadow.tsv\", \"SHADOW\"], 4: [\"Dragon_PDK.tsv\", \"DRAGON\"],\r\n 5: [\"Test-Haven.tsv\", \"HAVEN\"],\r\n 6: [\"Blood.tsv\", \"BLOOD\"], 7: [\"Dragon.tsv\", \"DRAGON\"], 8: [\"Forest.tsv\", \"FOREST\"],\r\n 9: [\"SpellBoost-Rune.tsv\", \"RUNE\"], 10: [\"Dimension_Shift_Rune.tsv\", \"RUNE\"],\r\n 11: [\"PtP_Forest.tsv\", \"FOREST\"], 12: [\"Mid_Shadow.tsv\", \"SHADOW\"],\r\n 13: [\"Neutral_Blood.tsv\", \"BLOOD\"]}\r\ndef multi_battle(episode_data):\r\n episode = episode_data[0]\r\n print(episode)\r\n deck_type1,deck_type2 = episode_data[3]\r\n deck_locations = episode_data[4]\r\n iteration = episode_data[-1]\r\n win = 0\r\n lose = 0\r\n libout = 0\r\n G = Game()\r\n first = 0\r\n for i in range(iteration):\r\n f = Field(5)\r\n p1 = episode_data[1].get_copy(None)\r\n p2 = episode_data[2].get_copy(None)\r\n d1 = tsv_to_deck(key_2_tsv_name[deck_type1][0])\r\n d1.set_leader_class(key_2_tsv_name[deck_type1][1])\r\n d2 = tsv_to_deck(key_2_tsv_name[deck_type2][0])\r\n d2.set_leader_class(key_2_tsv_name[deck_type2][1])\r\n d1.shuffle()\r\n d2.shuffle()\r\n p1.deck = d1\r\n p2.deck = d2\r\n\r\n f.players = [p1, p2] if i%2==0 else [p2,p1]\r\n\r\n p1.field = f\r\n p2.field = f\r\n f.players[0].draw(f.players[0].deck, 3)\r\n f.players[1].draw(f.players[1].deck, 3)\r\n p1.first = i%2 == 0\r\n p2.first = i%2 == 1\r\n p1.player_num = i%2\r\n p2.player_num = 1- i%2\r\n x1 = datetime.datetime.now()\r\n #train_data, reward = G.start_for_dual(f, virtual_flg=True, target_player_num=i % 2)\r\n w, l, lib_num, turn = G.start(f, virtual_flg= True)\r\n win = win + w if i%2==0 else win+l\r\n first += w\r\n\r\n result_data = (deck_locations,[win/100,first/100])\r\n\r\n return result_data\r\n episode = episode_data[0]\r\n #print(\"start:{}\".format(episode + 1))\r\n\r\n #if random.random() < 0.05:\r\n # p1 = Player(9,True)\r\n #if random.random() < 0.05:\r\n # p2 = Player(9, False)\r\n if deck_flg is None:\r\n deck_type1 = random.choice(list(key_2_tsv_name.keys()))\r\n deck_type2 = random.choice(list(key_2_tsv_name.keys()))\r\n else:\r\n deck_type1 = deck_flg\r\n deck_type2 = deck_flg\r\n d1 = tsv_to_deck(key_2_tsv_name[deck_type1][0])\r\n d1.set_leader_class(key_2_tsv_name[deck_type1][1])\r\n d2 = tsv_to_deck(key_2_tsv_name[deck_type2][0])\r\n d2.set_leader_class(key_2_tsv_name[deck_type2][1])\r\n d1.shuffle()\r\n d2.shuffle()\r\n p1.deck = d1\r\n p2.deck = d2\r\n f.players = [p1, p2]\r\n p1.field = f\r\n p2.field = f\r\n x1 = datetime.datetime.now()\r\n train_data, reward = G.start_for_dual(f, virtual_flg=True, target_player_num=episode % 2)\r\n\r\ndef run_main():\r\n deck_lists = list(map(int,args.decklists.split(\",\"))) if args.decklists is not None else None\r\n if deck_lists is None:\r\n deck_lists = list(deck_id_2_name.keys())\r\n else:\r\n assert all(key in deck_id_2_name for key in deck_lists)\r\n if deck_lists == [0, 1, 4, 5, 10, 12]:\r\n deck_lists = [0, 1, 4, 12, 5, 10]\r\n mylogger.info(\"deck_lists:{}\".format(deck_lists))\r\n D = [Deck() for i in range(len(deck_lists))]\r\n deck_index = 0\r\n # sorted_keys = sorted(list(deck_id_2_name.keys()))\r\n # for i in sorted_keys:\r\n # if i not in deck_lists:\r\n # continue\r\n for i in deck_lists:\r\n mylogger.info(\"{}(deck_id:{}):{}\".format(deck_index, i, key_2_tsv_name[i]))\r\n D[deck_index] = tsv_to_deck(key_2_tsv_name[i][0])\r\n D[deck_index].set_leader_class(key_2_tsv_name[i][1])\r\n deck_index += 1\r\n Results = {}\r\n list_range = range(len(deck_lists))\r\n #print(list(itertools.product(list_range,list_range)))\r\n\r\n Player1 = Player(9, True, policy=New_Dual_NN_Non_Rollout_OM_ISMCTSPolicy(model_name=args.model_name),\r\n mulligan=Min_cost_mulligan_policy())\r\n\r\n if args.opponent is not None:\r\n if args.model_name is not None:\r\n if args.opponent == \"Greedy\":\r\n Player2 = Player(9, True, policy=NN_GreedyPolicy(model_name=args.model_name),\r\n mulligan=Min_cost_mulligan_policy())\r\n elif args.opponent == \"MCTS\":\r\n Player2 = Player(9, True, policy=New_Dual_NN_Non_Rollout_OM_ISMCTSPolicy(model_name=args.model_name),\r\n mulligan=Min_cost_mulligan_policy())\r\n else:\r\n Player2 = Player(9, True, policy=Opponent_Modeling_MCTSPolicy(),\r\n mulligan=Min_cost_mulligan_policy())\r\n else:\r\n Player2 = Player(9, True, policy=AggroPolicy(),\r\n mulligan=Min_cost_mulligan_policy())\r\n\r\n Player1.name = \"Alice\"\r\n Player2.name = \"Bob\"\r\n iteration = int(args.iteration) if args.iteration is not None else 10\r\n deck_list_len = len(deck_lists)\r\n iter_data = [(deck_list_len*i+j,Player1,\r\n Player2,(i,j),(deck_lists[i],deck_lists[j]),iteration) for i,j in itertools.product(list_range,list_range)]\r\n pool = Pool(3) # 最大プロセス数:8\r\n # memory = pool.map(preparation, iter_data)\r\n result = pool.map(multi_battle, iter_data)\r\n #result = list(tqdm(result, total=len(list_range)**2))\r\n pool.close() # add this.\r\n pool.terminate() # add this.\r\n for data in result:\r\n Results[data[0]] = data[1]\r\n print(Results)\r\nif __name__ == \"__main__\":\r\n run_main()\r\n"
] |
[
[
"torch.multiprocessing.set_start_method",
"torch.multiprocessing.Pool"
]
] |
johnnylee/scikits.pulsefit
|
[
"d07571d524d974f52a863cd96a823fce6f1fed1e"
] |
[
"scikits/pulsefit/flagger.py"
] |
[
"from __future__ import print_function\n\nimport numpy as np\n\n\nclass Flagger(object):\n \"\"\"Flagger flags pulses based on the value of reduced chi^2.\"\"\"\n def __init__(self, p_err_len, sigma2, chi2red_max, debug=False):\n \"\"\"Arguments:\n p_err_len -- The length along each pulse used to compute the\n reduced chi^2 and residual.\n sigma2 -- The variance in the data.\n chi2red_max -- Maximum acceptable reduced chi^2.\n \"\"\"\n self.p_err_len = p_err_len\n self.sigma2 = sigma2\n self.chi2red_max = chi2red_max\n self.debug = debug\n\n def flag(self, block):\n if self.debug:\n print(\"\\nFlagging...\")\n\n # The number of degrees of freedom. This is equal to\n # (# observations) - (# fitted parameters) - 1\n # For each pulse, there are two fitted parameters: the\n # offset and the amplitude.\n nu = self.p_err_len - 3\n\n block.flags = np.empty(block.inds.size, dtype=np.uint8)\n block.chi2red = np.empty_like(block.amps)\n\n for i, idx1 in enumerate(block.inds):\n res = block.res[idx1:idx1 + self.p_err_len]\n block.chi2red[i] = np.sum(res**2) / (nu * self.sigma2)\n if block.chi2red[i] > self.chi2red_max:\n block.flags[i] = 1\n elif block.amps[i] <= 0:\n block.flags[i] = 2\n else:\n block.flags[i] = 0\n\n if self.debug:\n for flag in block.flags:\n if flag == 0:\n print(\" OK\")\n elif flag == 1:\n print(\" Reduced chi^2 too large:\", max(block.chi2red))\n elif flag == 2:\n print(\" Negative amplitude:\", max(block.amps))\n else:\n print(\" Unknown error.\")\n"
] |
[
[
"numpy.empty_like",
"numpy.sum",
"numpy.empty"
]
] |
atch841/TransUNet
|
[
"fa952b2f39e5dbc189fd3e0e6481bd1c74eb4c2c"
] |
[
"networks/ssseg/backbones/resnest.py"
] |
[
"'''\nFunction:\n Implementation of ResNeSt\nAuthor:\n Zhenchao Jin\n'''\nimport os\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as model_zoo\nfrom .resnet import ResNet\nfrom .resnet import Bottleneck as _Bottleneck\nfrom .builder import BuildNormalization, BuildActivation\n\n\n'''model urls'''\nmodel_urls = {\n 'resnest50': 'https://download.openmmlab.com/pretrain/third_party/resnest50_d2-7497a55b.pth',\n 'resnest101': 'https://download.openmmlab.com/pretrain/third_party/resnest101_d2-f3b931b2.pth',\n 'resnest200': 'https://download.openmmlab.com/pretrain/third_party/resnest200_d2-ca88e41f.pth',\n}\n\n\n'''Radix Softmax module'''\nclass RSoftmax(nn.Module):\n def __init__(self, radix, groups):\n super(RSoftmax, self).__init__()\n self.radix = radix\n self.groups = groups\n '''forward'''\n def forward(self, x):\n batch = x.size(0)\n if self.radix > 1:\n x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2)\n x = F.softmax(x, dim=1)\n x = x.reshape(batch, -1)\n else:\n x = torch.sigmoid(x)\n return x\n\n\n'''Split-Attention Conv2d in ResNeSt'''\nclass SplitAttentionConv2d(nn.Module):\n def __init__(self, in_channels, channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, radix=2, reduction_factor=4, norm_cfg=None, act_cfg=None, **kwargs):\n super(SplitAttentionConv2d, self).__init__()\n inter_channels = max(in_channels * radix // reduction_factor, 32)\n self.radix = radix\n self.conv = nn.Conv2d(in_channels, channels * radix, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups * radix, bias=False)\n self.bn0 = BuildNormalization(norm_cfg['type'], (channels * radix, norm_cfg['opts']))\n self.relu = BuildActivation(act_cfg['type'], **act_cfg['opts'])\n self.fc1 = nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0, groups=groups)\n self.bn1 = BuildNormalization(norm_cfg['type'], (inter_channels, norm_cfg['opts']))\n self.fc2 = nn.Conv2d(inter_channels, channels * radix, kernel_size=1, stride=1, padding=0, groups=groups)\n self.rsoftmax = RSoftmax(radix, groups)\n '''forward'''\n def forward(self, x):\n x = self.conv(x)\n x = self.bn0(x)\n x = self.relu(x)\n batch, rchannel = x.shape[:2]\n batch = x.size(0)\n if self.radix > 1:\n splits = x.view(batch, self.radix, -1, *x.shape[2:])\n gap = splits.sum(dim=1)\n else:\n gap = x\n gap = F.adaptive_avg_pool2d(gap, 1)\n gap = self.fc1(gap)\n gap = self.bn1(gap)\n gap = self.relu(gap)\n atten = self.fc2(gap)\n atten = self.rsoftmax(atten).view(batch, -1, 1, 1)\n if self.radix > 1:\n attens = atten.view(batch, self.radix, -1, *atten.shape[2:])\n out = torch.sum(attens * splits, dim=1)\n else:\n out = atten * x\n return out.contiguous()\n\n\n'''Bottleneck block for ResNeSt'''\nclass Bottleneck(_Bottleneck):\n expansion = 4\n def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, radix=2, reduction_factor=4, use_avg_after_block_conv2=True, **kwargs):\n super(Bottleneck, self).__init__(inplanes, planes, **kwargs)\n if groups == 1: width = planes\n else: width = math.floor(planes * (base_width / base_channels)) * groups\n norm_cfg, act_cfg = kwargs['norm_cfg'], kwargs['act_cfg']\n self.use_avg_after_block_conv2 = use_avg_after_block_conv2 and self.stride > 1\n self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn1 = BuildNormalization(norm_cfg['type'], (width, norm_cfg['opts']))\n self.conv2 = SplitAttentionConv2d(\n in_channels=width, \n channels=width, \n kernel_size=3, \n stride=1 if self.use_avg_after_block_conv2 else self.stride, \n padding=kwargs['dilation'],\n dilation=kwargs['dilation'],\n groups=groups,\n radix=radix,\n reduction_factor=reduction_factor,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg,\n )\n delattr(self, 'bn2')\n self.conv3 = nn.Conv2d(width, planes * self.expansion, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn3 = BuildNormalization(norm_cfg['type'], (planes * self.expansion, norm_cfg['opts']))\n if self.use_avg_after_block_conv2: self.avd_layer = nn.AvgPool2d(3, self.stride, padding=1)\n '''forward'''\n def forward(self, x):\n identity = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n if self.use_avg_after_block_conv2: out = self.avd_layer(out)\n out = self.conv3(out)\n out = self.bn3(out)\n if self.downsample is not None: identity = self.downsample(x)\n out += identity\n out = self.relu(out)\n return out\n\n\n'''ResNeSt'''\nclass ResNeSt(ResNet):\n arch_settings = {\n 50: (Bottleneck, (3, 4, 6, 3)),\n 101: (Bottleneck, (3, 4, 23, 3)),\n 152: (Bottleneck, (3, 8, 36, 3)),\n 200: (Bottleneck, (3, 24, 36, 3))\n }\n def __init__(self, groups=1, base_width=4, radix=2, reduction_factor=4, use_avg_after_block_conv2=True, **kwargs):\n self.extra_args_for_makelayer = {\n 'radix': radix,\n 'groups': groups,\n 'base_width': base_width,\n 'reduction_factor': reduction_factor,\n 'base_channels': kwargs['base_channels'],\n 'use_avg_after_block_conv2': use_avg_after_block_conv2,\n }\n super(ResNeSt, self).__init__(**kwargs)\n '''make res layer'''\n def makelayer(self, block, inplanes, planes, num_blocks, stride=1, dilation=1, contract_dilation=True, use_avg_for_downsample=False, norm_cfg=None, act_cfg=None, **kwargs):\n kwargs.update(self.extra_args_for_makelayer)\n downsample = None\n dilations = [dilation] * num_blocks\n if contract_dilation and dilation > 1: dilations[0] = dilation // 2\n if stride != 1 or inplanes != planes * block.expansion:\n if use_avg_for_downsample:\n downsample = nn.Sequential(\n nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False),\n nn.Conv2d(inplanes, planes * block.expansion, kernel_size=1, stride=1, padding=0, bias=False),\n BuildNormalization(norm_cfg['type'], (planes * block.expansion, norm_cfg['opts']))\n )\n else:\n downsample = nn.Sequential(\n nn.Conv2d(inplanes, planes * block.expansion, kernel_size=1, stride=stride, padding=0, bias=False),\n BuildNormalization(norm_cfg['type'], (planes * block.expansion, norm_cfg['opts']))\n )\n layers = []\n layers.append(block(inplanes, planes, stride=stride, dilation=dilations[0], downsample=downsample, norm_cfg=norm_cfg, act_cfg=act_cfg, **kwargs))\n self.inplanes = planes * block.expansion\n for i in range(1, num_blocks): layers.append(block(planes * block.expansion, planes, stride=1, dilation=dilations[i], norm_cfg=norm_cfg, act_cfg=act_cfg, **kwargs))\n return nn.Sequential(*layers)\n\n\n'''build resnest'''\ndef BuildResNeSt(resnest_type, **kwargs):\n # assert whether support\n supported_resnests = {\n 'resnest50': {'depth': 50},\n 'resnest101': {'depth': 101},\n 'resnest152': {'depth': 152},\n 'resnest200': {'depth': 200},\n }\n assert resnest_type in supported_resnests, 'unsupport the resnest_type %s...' % resnest_type\n # parse args\n default_args = {\n 'radix': 2,\n 'groups': 1,\n 'outstride': 8,\n 'base_width': 4,\n 'use_stem': True,\n 'norm_cfg': None,\n 'in_channels': 3,\n 'pretrained': True,\n 'base_channels': 64,\n 'stem_channels': 128,\n 'reduction_factor': 4,\n 'contract_dilation': True,\n 'out_indices': (0, 1, 2, 3),\n 'pretrained_model_path': '',\n 'use_avg_for_downsample': True,\n 'use_avg_after_block_conv2': True,\n 'act_cfg': {'type': 'relu', 'opts': {'inplace': True}},\n }\n for key, value in kwargs.items():\n if key in default_args: default_args.update({key: value})\n # obtain args for instanced resnest\n resnest_args = supported_resnests[resnest_type]\n resnest_args.update(default_args)\n # obtain the instanced resnest\n model = ResNeSt(**resnest_args)\n # load weights of pretrained model\n if default_args['pretrained'] and os.path.exists(default_args['pretrained_model_path']):\n checkpoint = torch.load(default_args['pretrained_model_path'])\n if 'state_dict' in checkpoint: state_dict = checkpoint['state_dict']\n else: state_dict = checkpoint\n model.load_state_dict(state_dict, strict=False)\n elif default_args['pretrained']:\n checkpoint = model_zoo.load_url(model_urls[resnest_type])\n if 'state_dict' in checkpoint: state_dict = checkpoint['state_dict']\n else: state_dict = checkpoint\n model.load_state_dict(state_dict, strict=False)\n # return the model\n return model"
] |
[
[
"torch.nn.Sequential",
"torch.nn.functional.softmax",
"torch.sigmoid",
"torch.load",
"torch.nn.Conv2d",
"torch.sum",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.AvgPool2d",
"torch.utils.model_zoo.load_url"
]
] |
naitmare01/Adventofcode
|
[
"34f2832fa7a18b76cf9827890632740c6f60679c"
] |
[
"2016/day4/day4.py"
] |
[
"import argparse\nimport pandas as pd\nfrom collections import Counter\n\n\ndef arguments():\n # Handle command line arguments\n parser = argparse.ArgumentParser(description='Adventofcode.')\n parser.add_argument('-f', '--file', required=True)\n\n args = parser.parse_args()\n\n return args\n\n\ndef main():\n args = arguments()\n\n with open(args.file) as file:\n codes = file.read().strip()\n codes = codes.splitlines()\n\n def top_five(x):\n d = dict(Counter(''.join(sorted(x))))\n s = sorted(d.items(), key=lambda x: (-x[1], x[0]))\n return (''.join([i[0] for i in s[:5]]))\n\n def decrypt(encrypted_text):\n 'http://programeveryday.com/post/implementing-a-basic-caesar-cipher-in-python/'\n \"\"\"Encrypt the string and return the ciphertext\"\"\"\n key = 'abcdefghijklmnopqrstuvwxyz'\n result = ''\n e_room = encrypted_text[:-11]\n d_room = []\n c_shift = int(encrypted_text[-10:-7])\n\n for word in e_room.split('-'):\n for line in word:\n i = (key.index(line) + c_shift) % 26\n result += key[i]\n d_room.append(result)\n result = ''\n return(\" \".join(d_room))\n\n # Part 1:\n df = pd.read_csv(args.file, names=[\"raw_strings\"])\n df[\"checksum\"] = df[\"raw_strings\"].str.extract('\\[(\\w+)\\]')\n df[\"sectID\"] = df[\"raw_strings\"].str.extract('-(\\d+)\\[')\n df[\"string\"] = df.raw_strings.str[:-11]\n df[\"string\"] = df[\"string\"].str.replace('-', '')\n df.sectID = pd.to_numeric(df.sectID)\n df[\"top_five\"] = df[\"string\"].apply(top_five)\n print(df.loc[df.checksum == df.top_five]['sectID'].sum())\n\n # Part 2:\n df[\"room\"] = df[\"raw_strings\"].apply(decrypt)\n print(df.loc[df['room'].str.contains('north')]['sectID'])\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"pandas.read_csv",
"pandas.to_numeric"
]
] |
ek9852/tensorflow
|
[
"2c687ec4d9f824f9827d79a205aa45b84727ac64"
] |
[
"tensorflow/python/framework/test_util.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# pylint: disable=invalid-name\n\"\"\"Test utils for tensorflow.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nfrom collections import OrderedDict\nimport contextlib\nimport functools\nimport gc\nimport itertools\nimport math\nimport os\nimport random\nimport re\nimport tempfile\nimport threading\nimport time\nimport unittest\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport six\n\nfrom google.protobuf import descriptor_pool\nfrom google.protobuf import text_format\n\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.python import tf2\nfrom tensorflow.python.client import device_lib\nfrom tensorflow.python.client import pywrap_tf_session\nfrom tensorflow.python.client import session\nfrom tensorflow.python.compat.compat import forward_compatibility_horizon\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import device as pydev\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import gpu_util\nfrom tensorflow.python.framework import importer\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.framework import tfrt_utils\nfrom tensorflow.python.framework import versions\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import control_flow_util_v2\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import script_ops\nfrom tensorflow.python.ops import summary_ops_v2\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops.ragged import ragged_ops # pylint: disable=unused-import\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.ops.ragged import ragged_tensor_value\nfrom tensorflow.python.platform import _pywrap_stacktrace_handler\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import server_lib\nfrom tensorflow.python.util import _pywrap_util_port\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.compat import collections_abc\nfrom tensorflow.python.util.protobuf import compare\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# If the below import is made available through the BUILD rule, then this\n# function is overridden and will instead return True and cause Tensorflow\n# graphs to be compiled with XLA.\ndef is_xla_enabled():\n return False\n\n\ntry:\n from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top, unused-import\nexcept Exception: # pylint: disable=broad-except\n pass\n\n\n# Uses the same mechanism as above to selectively enable/disable MLIR\n# compilation.\ndef is_mlir_bridge_enabled():\n return None\n\n\ntry:\n from tensorflow.python.framework.is_mlir_bridge_test_false import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import\nexcept ImportError:\n try:\n from tensorflow.python.framework.is_mlir_bridge_test_true import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import\n except ImportError:\n pass\n\n\ndef _get_object_count_by_type(exclude=()):\n return (\n collections.Counter([type(obj).__name__ for obj in gc.get_objects()]) -\n collections.Counter([type(obj).__name__ for obj in exclude]))\n\n\n@tf_export(\"test.gpu_device_name\")\ndef gpu_device_name():\n \"\"\"Returns the name of a GPU device if available or the empty string.\"\"\"\n for x in device_lib.list_local_devices():\n if x.device_type == \"GPU\":\n return compat.as_str(x.name)\n return \"\"\n\n\ndef assert_ops_in_graph(expected_ops, graph):\n \"\"\"Assert all expected operations are found.\n\n Args:\n expected_ops: `dict<string, string>` of op name to op type.\n graph: Graph to check.\n\n Returns:\n `dict<string, node>` of node name to node.\n\n Raises:\n ValueError: If the expected ops are not present in the graph.\n \"\"\"\n actual_ops = {}\n gd = graph.as_graph_def()\n for node in gd.node:\n if node.name in expected_ops:\n if expected_ops[node.name] != node.op:\n raise ValueError(\"Expected op for node %s is different. %s vs %s\" %\n (node.name, expected_ops[node.name], node.op))\n actual_ops[node.name] = node\n if set(expected_ops.keys()) != set(actual_ops.keys()):\n raise ValueError(\"Not all expected ops are present. Expected %s, found %s\" %\n (expected_ops.keys(), actual_ops.keys()))\n return actual_ops\n\n\n@tf_export(\"test.assert_equal_graph_def\", v1=[])\ndef assert_equal_graph_def_v2(expected, actual):\n \"\"\"Asserts that two `GraphDef`s are (mostly) the same.\n\n Compares two `GraphDef` protos for equality, ignoring versions and ordering of\n nodes, attrs, and control inputs. Node names are used to match up nodes\n between the graphs, so the naming of nodes must be consistent. This function\n ignores randomized attribute values that may appear in V2 checkpoints.\n\n Args:\n expected: The `GraphDef` we expected.\n actual: The `GraphDef` we have.\n\n Raises:\n AssertionError: If the `GraphDef`s do not match.\n TypeError: If either argument is not a `GraphDef`.\n \"\"\"\n assert_equal_graph_def(actual, expected, checkpoint_v2=True,\n hash_table_shared_name=True)\n\n\n@tf_export(v1=[\"test.assert_equal_graph_def\"])\ndef assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,\n hash_table_shared_name=False):\n \"\"\"Asserts that two `GraphDef`s are (mostly) the same.\n\n Compares two `GraphDef` protos for equality, ignoring versions and ordering of\n nodes, attrs, and control inputs. Node names are used to match up nodes\n between the graphs, so the naming of nodes must be consistent.\n\n Args:\n actual: The `GraphDef` we have.\n expected: The `GraphDef` we expected.\n checkpoint_v2: boolean determining whether to ignore randomized attribute\n values that appear in V2 checkpoints.\n hash_table_shared_name: boolean determining whether to ignore randomized\n shared_names that appear in HashTableV2 op defs.\n\n Raises:\n AssertionError: If the `GraphDef`s do not match.\n TypeError: If either argument is not a `GraphDef`.\n \"\"\"\n assert_equal_graph_def(actual, expected, checkpoint_v2,\n hash_table_shared_name)\n\n\ndef assert_equal_graph_def(actual, expected, checkpoint_v2=False,\n hash_table_shared_name=False):\n if not isinstance(actual, graph_pb2.GraphDef):\n raise TypeError(\"Expected tf.GraphDef for actual, got %s\" %\n type(actual).__name__)\n if not isinstance(expected, graph_pb2.GraphDef):\n raise TypeError(\"Expected tf.GraphDef for expected, got %s\" %\n type(expected).__name__)\n\n if checkpoint_v2:\n _strip_checkpoint_v2_randomized(actual)\n _strip_checkpoint_v2_randomized(expected)\n\n if hash_table_shared_name:\n _strip_hash_table_shared_name(actual)\n _strip_hash_table_shared_name(expected)\n\n diff = pywrap_tf_session.EqualGraphDefWrapper(actual.SerializeToString(),\n expected.SerializeToString())\n if diff:\n raise AssertionError(compat.as_str(diff))\n\n\ndef assert_meta_graph_protos_equal(tester, a, b):\n \"\"\"Compares MetaGraphDefs `a` and `b` in unit test class `tester`.\"\"\"\n # Carefully check the collection_defs\n tester.assertEqual(set(a.collection_def), set(b.collection_def))\n collection_keys = a.collection_def.keys()\n for k in collection_keys:\n a_value = a.collection_def[k]\n b_value = b.collection_def[k]\n proto_type = ops.get_collection_proto_type(k)\n if proto_type:\n a_proto = proto_type()\n b_proto = proto_type()\n # Number of entries in the collections is the same\n tester.assertEqual(\n len(a_value.bytes_list.value), len(b_value.bytes_list.value))\n for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,\n b_value.bytes_list.value):\n a_proto.ParseFromString(a_value_item)\n b_proto.ParseFromString(b_value_item)\n tester.assertProtoEquals(a_proto, b_proto)\n else:\n tester.assertEquals(a_value, b_value)\n # Compared the fields directly, remove their raw values from the\n # proto comparison below.\n a.ClearField(\"collection_def\")\n b.ClearField(\"collection_def\")\n\n # Check the graph_defs.\n assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)\n # Check graph_def versions (ignored by assert_equal_graph_def).\n tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)\n # Compared the fields directly, remove their raw values from the\n # proto comparison below.\n a.ClearField(\"graph_def\")\n b.ClearField(\"graph_def\")\n\n tester.assertProtoEquals(a, b)\n\n\n# Matches attributes named via _SHARDED_SUFFIX in\n# tensorflow/python/training/saver.py\n_SHARDED_SAVE_OP_PATTERN = \"_temp_[0-9a-z]{32}/part\"\n\n\ndef _strip_checkpoint_v2_randomized(graph_def):\n for node in graph_def.node:\n delete_keys = []\n for attr_key in node.attr:\n attr_tensor_value = node.attr[attr_key].tensor\n if attr_tensor_value and len(attr_tensor_value.string_val) == 1:\n attr_tensor_string_value = attr_tensor_value.string_val[0]\n if (attr_tensor_string_value and\n re.match(compat.as_bytes(_SHARDED_SAVE_OP_PATTERN),\n attr_tensor_string_value)):\n delete_keys.append(attr_key)\n for attr_key in delete_keys:\n del node.attr[attr_key]\n\n\n_TABLE_SHARED_NAME_PATTERN = r\"hash_table_[0-9a-z\\-]+\"\n\n\ndef _strip_hash_table_shared_name(graph_def):\n for node in graph_def.node:\n delete_keys = []\n if node.op == \"HashTableV2\" and \"shared_name\" in node.attr:\n if re.match(compat.as_bytes(_TABLE_SHARED_NAME_PATTERN),\n node.attr[\"shared_name\"].s):\n delete_keys.append(\"shared_name\")\n for attr_key in delete_keys:\n del node.attr[attr_key]\n\n\ndef IsGoogleCudaEnabled():\n return _pywrap_util_port.IsGoogleCudaEnabled()\n\n\ndef IsBuiltWithROCm():\n return _pywrap_util_port.IsBuiltWithROCm()\n\n\ndef IsBuiltWithXLA():\n return _pywrap_util_port.IsBuiltWithXLA()\n\n\ndef IsBuiltWithNvcc():\n return _pywrap_util_port.IsBuiltWithNvcc()\n\n\ndef GpuSupportsHalfMatMulAndConv():\n return _pywrap_util_port.GpuSupportsHalfMatMulAndConv()\n\n\ndef IsMklEnabled():\n return _pywrap_util_port.IsMklEnabled()\n\n\ndef InstallStackTraceHandler():\n _pywrap_stacktrace_handler.InstallStacktraceHandler()\n\n\ndef NHWCToNCHW(input_tensor):\n \"\"\"Converts the input from the NHWC format to NCHW.\n\n Args:\n input_tensor: a 3-, 4-, or 5-D tensor, or an array representing shape\n\n Returns:\n converted tensor or shape array\n \"\"\"\n # tensor dim -> new axis order\n new_axes = {3: [0, 2, 1], 4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}\n if isinstance(input_tensor, ops.Tensor):\n ndims = input_tensor.shape.ndims\n return array_ops.transpose(input_tensor, new_axes[ndims])\n else:\n ndims = len(input_tensor)\n return [input_tensor[a] for a in new_axes[ndims]]\n\n\ndef NHWCToNCHW_VECT_C(input_shape_or_tensor):\n \"\"\"Transforms the input from the NHWC layout to NCHW_VECT_C layout.\n\n Note: Does not include quantization or type conversion steps, which should\n be applied afterwards.\n\n Args:\n input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape\n\n Returns:\n tensor or shape array transformed into NCHW_VECT_C\n\n Raises:\n ValueError: if last dimension of `input_shape_or_tensor` is not evenly\n divisible by 4.\n \"\"\"\n permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}\n is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)\n temp_shape = (\n input_shape_or_tensor.shape.as_list()\n if is_tensor else input_shape_or_tensor)\n if temp_shape[-1] % 4 != 0:\n raise ValueError(\n \"Last dimension of input must be evenly divisible by 4 to convert to \"\n \"NCHW_VECT_C.\")\n temp_shape[-1] //= 4\n temp_shape.append(4)\n permutation = permutations[len(temp_shape)]\n if is_tensor:\n t = array_ops.reshape(input_shape_or_tensor, temp_shape)\n return array_ops.transpose(t, permutation)\n else:\n return [temp_shape[a] for a in permutation]\n\n\ndef NCHW_VECT_CToNHWC(input_shape_or_tensor):\n \"\"\"Transforms the input from the NCHW_VECT_C layout to NHWC layout.\n\n Note: Does not include de-quantization or type conversion steps, which should\n be applied beforehand.\n\n Args:\n input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape\n\n Returns:\n tensor or shape array transformed into NHWC\n\n Raises:\n ValueError: if last dimension of `input_shape_or_tensor` is not 4.\n \"\"\"\n permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}\n is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)\n input_shape = (\n input_shape_or_tensor.shape.as_list()\n if is_tensor else input_shape_or_tensor)\n if input_shape[-1] != 4:\n raise ValueError(\"Last dimension of NCHW_VECT_C must be 4.\")\n permutation = permutations[len(input_shape)]\n nhwc_shape = [input_shape[a] for a in permutation[:-1]]\n nhwc_shape[-1] *= input_shape[-1]\n if is_tensor:\n t = array_ops.transpose(input_shape_or_tensor, permutation)\n return array_ops.reshape(t, nhwc_shape)\n else:\n return nhwc_shape\n\n\ndef NCHWToNHWC(input_tensor):\n \"\"\"Converts the input from the NCHW format to NHWC.\n\n Args:\n input_tensor: a 4- or 5-D tensor, or an array representing shape\n\n Returns:\n converted tensor or shape array\n \"\"\"\n # tensor dim -> new axis order\n new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}\n if isinstance(input_tensor, ops.Tensor):\n ndims = input_tensor.shape.ndims\n return array_ops.transpose(input_tensor, new_axes[ndims])\n else:\n ndims = len(input_tensor)\n return [input_tensor[a] for a in new_axes[ndims]]\n\n\ndef skip_if(condition):\n \"\"\"Skips the decorated function if condition is or evaluates to True.\n\n Args:\n condition: Either an expression that can be used in \"if not condition\"\n statement, or a callable whose result should be a boolean.\n\n Returns:\n The wrapped function\n \"\"\"\n\n def real_skip_if(fn):\n\n def wrapper(*args, **kwargs):\n if callable(condition):\n skip = condition()\n else:\n skip = condition\n if not skip:\n return fn(*args, **kwargs)\n\n return wrapper\n\n return real_skip_if\n\n\[email protected]\ndef skip_if_error(test_obj, error_type, messages=None):\n \"\"\"Context manager to skip cases not considered failures by the tests.\n\n Note that this does not work if used in setUpClass/tearDownClass.\n Usage in setUp/tearDown works fine just like regular test methods.\n\n Args:\n test_obj: A test object provided as `self` in the test methods; this object\n is usually an instance of `unittest.TestCase`'s subclass and should have\n `skipTest` method.\n error_type: The error type to skip. Note that if `messages` are given, both\n `error_type` and `messages` need to match for the test to be skipped.\n messages: Optional, a string or list of strings. If `None`, the test will be\n skipped if `error_type` matches what is raised; otherwise, the test is\n skipped if any of the `messages` is contained in the message of the error\n raised, and `error_type` matches the error raised.\n\n Yields:\n Nothing.\n \"\"\"\n if messages:\n messages = nest.flatten(messages)\n try:\n yield\n except error_type as e:\n if not messages or any(message in str(e) for message in messages):\n test_obj.skipTest(\"Skipping error: {}: {}\".format(type(e), str(e)))\n else:\n raise\n\n\ndef enable_c_shapes(fn):\n \"\"\"No-op. TODO(b/74620627): Remove this.\"\"\"\n return fn\n\n\ndef with_c_shapes(cls):\n \"\"\"No-op. TODO(b/74620627): Remove this.\"\"\"\n return cls\n\n\ndef enable_control_flow_v2(fn):\n \"\"\"Decorator for enabling CondV2 and WhileV2 on a test.\n\n Note this enables using CondV2 and WhileV2 after running the test class's\n setup/teardown methods.\n\n In addition to this, callers must import the while_v2 module in order to set\n the _while_v2 module in control_flow_ops.\n\n Args:\n fn: the function to be wrapped\n\n Returns:\n The wrapped function\n \"\"\"\n\n def wrapper(*args, **kwargs):\n enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2\n control_flow_util.ENABLE_CONTROL_FLOW_V2 = True\n try:\n return fn(*args, **kwargs)\n finally:\n control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old\n\n return wrapper\n\n\ndef with_control_flow_v2(cls):\n \"\"\"Adds methods that call original methods with WhileV2 and CondV2 enabled.\n\n Note this enables CondV2 and WhileV2 in new methods after running the test\n class's setup method.\n\n In addition to this, callers must import the while_v2 module in order to set\n the _while_v2 module in control_flow_ops.\n\n If a test function has _disable_control_flow_v2 attr set to True (using the\n @disable_control_flow_v2 decorator), the v2 function is not generated for it.\n\n Example:\n\n @test_util.with_control_flow_v2\n class ControlFlowTest(test.TestCase):\n\n def testEnabledForV2(self):\n ...\n\n @test_util.disable_control_flow_v2(\"b/xyzabc\")\n def testDisabledForV2(self):\n ...\n\n Generated class:\n class ControlFlowTest(test.TestCase):\n\n def testEnabledForV2(self):\n ...\n\n def testEnabledForV2WithControlFlowV2(self):\n // Enable V2 flags.\n testEnabledForV2(self)\n // Restore V2 flags.\n\n def testDisabledForV2(self):\n ...\n\n Args:\n cls: class to decorate\n\n Returns:\n cls with new test methods added\n \"\"\"\n if control_flow_util.ENABLE_CONTROL_FLOW_V2:\n return cls\n\n for name, value in cls.__dict__.copy().items():\n if (callable(value) and\n name.startswith(unittest.TestLoader.testMethodPrefix) and\n not getattr(value, \"_disable_control_flow_v2\", False)):\n setattr(cls, name + \"WithControlFlowV2\", enable_control_flow_v2(value))\n return cls\n\n\ndef disable_control_flow_v2(unused_msg):\n \"\"\"Decorator for a function in a with_control_flow_v2 enabled test class.\n\n Blocks the function from being run with v2 control flow ops.\n\n Args:\n unused_msg: Reason for disabling.\n\n Returns:\n The wrapped function with _disable_control_flow_v2 attr set to True.\n \"\"\"\n\n def wrapper(func):\n func._disable_control_flow_v2 = True\n return func\n\n return wrapper\n\n\ndef enable_output_all_intermediates(fn):\n \"\"\"Force-enable outputing all intermediates from functional control flow ops.\n\n Args:\n fn: the function to be wrapped\n\n Returns:\n The wrapped function\n \"\"\"\n\n def wrapper(*args, **kwargs):\n output_all_intermediates_old = \\\n control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE\n control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True\n try:\n return fn(*args, **kwargs)\n finally:\n control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \\\n output_all_intermediates_old\n\n return wrapper\n\n\ndef assert_no_new_pyobjects_executing_eagerly(func=None, warmup_iters=2):\n \"\"\"Decorator for asserting that no new Python objects persist after a test.\n\n Runs the test multiple times executing eagerly, first as a warmup and then to\n let objects accumulate. The warmup helps ignore caches which do not grow as\n the test is run repeatedly.\n\n Useful for checking that there are no missing Py_DECREFs in the C exercised by\n a bit of Python.\n\n Args:\n func: The function to test.\n warmup_iters: The numer of warmup iterations, excluded from measuring.\n\n Returns:\n The wrapped function performing the test.\n \"\"\"\n\n def wrap_f(f):\n def decorator(self, *args, **kwargs):\n \"\"\"Warms up, gets object counts, runs the test, checks for new objects.\"\"\"\n with context.eager_mode():\n gc.disable()\n # Run the test 2 times as warmup, in an attempt to fill up caches, which\n # should not grow as the test is run repeatedly below.\n #\n # TODO(b/117156879): Running warmup twice is black magic; we have seen\n # tests that fail with 1 warmup run, and pass with 2, on various\n # versions of python2.7.x.\n for _ in range(warmup_iters):\n f(self, *args, **kwargs)\n # Since we aren't in the normal test lifecylce, we need to manually run\n # cleanups to clear out their object references.\n self.doCleanups()\n\n # Some objects are newly created by _get_object_count_by_type(). So\n # create and save as a dummy variable to include it as a baseline.\n obj_count_by_type = _get_object_count_by_type()\n gc.collect()\n # unittest.doCleanups adds to self._outcome with each unwound call.\n # These objects are retained across gc collections so we exclude them\n # from the object count calculation.\n obj_count_by_type = _get_object_count_by_type(\n exclude=gc.get_referents(self._outcome.errors,\n self._outcome.skipped))\n\n if ops.has_default_graph():\n collection_sizes_before = {\n collection: len(ops.get_collection(collection))\n for collection in ops.get_default_graph().collections\n }\n for _ in range(3):\n f(self, *args, **kwargs)\n # Since we aren't in the normal test lifecylce, we need to manually run\n # cleanups to clear out their object references.\n self.doCleanups()\n # Note that gc.get_objects misses anything that isn't subject to garbage\n # collection (C types). Collections are a common source of leaks, so we\n # test for collection sizes explicitly.\n if ops.has_default_graph():\n for collection_key in ops.get_default_graph().collections:\n collection = ops.get_collection(collection_key)\n size_before = collection_sizes_before.get(collection_key, 0)\n if len(collection) > size_before:\n raise AssertionError(\n (\"Collection %s increased in size from \"\n \"%d to %d (current items %s).\") %\n (collection_key, size_before, len(collection), collection))\n # Make sure our collection checks don't show up as leaked memory by\n # removing references to temporary variables.\n del collection\n del collection_key\n del size_before\n del collection_sizes_before\n gc.collect()\n\n # There should be no new Python objects hanging around.\n obj_count_by_type = (\n _get_object_count_by_type(\n exclude=gc.get_referents(self._outcome.errors,\n self._outcome.skipped)) -\n obj_count_by_type)\n # In some cases (specifically on MacOS), new_count is somehow\n # smaller than previous_count.\n # Using plain assert because not all classes using this decorator\n # have assertLessEqual\n assert not obj_count_by_type, (\n \"The following objects were newly created: %s\" %\n str(obj_count_by_type))\n gc.enable()\n return decorator\n\n if func is None:\n return wrap_f\n else:\n return wrap_f(func)\n\n\ndef assert_no_new_tensors(f):\n \"\"\"Decorator for asserting that no new Tensors persist after a test.\n\n Mainly useful for checking that code using the Python C API has correctly\n manipulated reference counts.\n\n Clears the caches that it knows about, runs the garbage collector, then checks\n that there are no Tensor or Tensor-like objects still around. This includes\n Tensors to which something still has a reference (e.g. from missing\n Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one\n of the objects has __del__ defined).\n\n Args:\n f: The test case to run.\n\n Returns:\n The decorated test case.\n \"\"\"\n\n def decorator(self, **kwargs):\n \"\"\"Finds existing Tensors, runs the test, checks for new Tensors.\"\"\"\n\n def _is_tensorflow_object(obj):\n try:\n return isinstance(obj,\n (ops.Tensor, variables.Variable,\n tensor_shape.Dimension, tensor_shape.TensorShape))\n except (ReferenceError, AttributeError):\n # If the object no longer exists, we don't care about it.\n return False\n\n tensors_before = set(\n id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))\n outside_executed_eagerly = context.executing_eagerly()\n # Run the test in a new graph so that collections get cleared when it's\n # done, but inherit the graph key so optimizers behave.\n outside_graph_key = ops.get_default_graph()._graph_key\n with ops.Graph().as_default():\n ops.get_default_graph()._graph_key = outside_graph_key\n if outside_executed_eagerly:\n with context.eager_mode():\n result = f(self, **kwargs)\n else:\n result = f(self, **kwargs)\n # Make an effort to clear caches, which would otherwise look like leaked\n # Tensors.\n context.context()._clear_caches() # pylint: disable=protected-access\n gc.collect()\n tensors_after = [\n obj for obj in gc.get_objects()\n if _is_tensorflow_object(obj) and id(obj) not in tensors_before\n ]\n if tensors_after:\n raise AssertionError((\"%d Tensors not deallocated after test: %s\" % (\n len(tensors_after),\n str(tensors_after),\n )))\n return result\n\n return decorator\n\n\ndef _find_reference_cycle(objects, idx):\n\n def get_ignore_reason(obj, denylist):\n \"\"\"Tests whether an object should be omitted from the dependency graph.\"\"\"\n if len(denylist) > 100:\n return \"<depth limit>\"\n if tf_inspect.isframe(obj):\n if \"test_util.py\" in tf_inspect.getframeinfo(obj)[0]:\n return \"<test code>\"\n for b in denylist:\n if b is obj:\n return \"<test code>\"\n if obj is denylist:\n return \"<test code>\"\n return None\n\n # Note: this function is meant to help with diagnostics. Its output is purely\n # a human-readable representation, so you may freely modify it to suit your\n # needs.\n def describe(obj, denylist, leaves_only=False):\n \"\"\"Returns a custom human-readable summary of obj.\n\n Args:\n obj: the value to describe.\n denylist: same as denylist in get_ignore_reason.\n leaves_only: boolean flag used when calling describe recursively. Useful\n for summarizing collections.\n \"\"\"\n if get_ignore_reason(obj, denylist):\n return \"{}{}\".format(get_ignore_reason(obj, denylist), type(obj))\n if tf_inspect.isframe(obj):\n return \"frame: {}\".format(tf_inspect.getframeinfo(obj))\n elif tf_inspect.ismodule(obj):\n return \"module: {}\".format(obj.__name__)\n else:\n if leaves_only:\n return \"{}, {}\".format(type(obj), id(obj))\n elif isinstance(obj, list):\n return \"list({}): {}\".format(\n id(obj), [describe(e, denylist, leaves_only=True) for e in obj])\n elif isinstance(obj, tuple):\n return \"tuple({}): {}\".format(\n id(obj), [describe(e, denylist, leaves_only=True) for e in obj])\n elif isinstance(obj, dict):\n return \"dict({}): {} keys\".format(id(obj), len(obj.keys()))\n elif tf_inspect.isfunction(obj):\n return \"function({}) {}; globals ID: {}\".format(\n id(obj), obj.__name__, id(obj.__globals__))\n else:\n return \"{}, {}\".format(type(obj), id(obj))\n\n def build_ref_graph(obj, graph, reprs, denylist):\n \"\"\"Builds a reference graph as <referrer> -> <list of referents>.\n\n Args:\n obj: The object to start from. The graph will be built by recursively\n adding its referrers.\n graph: Dict holding the graph to be built. To avoid creating extra\n references, the graph holds object IDs rather than actual objects.\n reprs: Auxiliary structure that maps object IDs to their human-readable\n description.\n denylist: List of objects to ignore.\n \"\"\"\n referrers = gc.get_referrers(obj)\n denylist = denylist + (referrers,)\n\n obj_id = id(obj)\n for r in referrers:\n if get_ignore_reason(r, denylist) is None:\n r_id = id(r)\n if r_id not in graph:\n graph[r_id] = []\n if obj_id not in graph[r_id]:\n graph[r_id].append(obj_id)\n build_ref_graph(r, graph, reprs, denylist)\n reprs[r_id] = describe(r, denylist)\n\n def find_cycle(el, graph, reprs, path):\n \"\"\"Finds and prints a single cycle in the dependency graph.\"\"\"\n if el not in graph:\n return\n for r in graph[el]:\n if r in path:\n logging.error(\"Reference cycle sample:\")\n for p in path + (r,):\n logging.error(reprs.get(p, \"unknown object \" + str(p)))\n return True\n else:\n if find_cycle(r, graph, reprs, path + (r,)):\n return True\n return False\n\n obj = objects[idx]\n graph = {} # referrer ID -> object ID\n reprs = {} # object ID -> description\n build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,\n describe, build_ref_graph, find_cycle))\n for k in graph:\n if find_cycle(k, graph, reprs, ()):\n return True\n return False\n\n\ndef assert_no_garbage_created(f):\n \"\"\"Test method decorator to assert that no garbage has been created.\n\n Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters\n cannot be un-set (i.e. will disable garbage collection for any other unit\n tests in the same file/shard).\n\n Args:\n f: The function to decorate.\n\n Returns:\n The decorated function.\n \"\"\"\n\n def decorator(self, **kwargs):\n \"\"\"Sets DEBUG_SAVEALL, runs the test, and checks for new garbage.\"\"\"\n # Force-load `distribution_strategy_context` to prevent GC at\n # test time when using eager. Remove once b/117329403 is resolved.\n tape.distribution_strategy_context.get_strategy()\n\n gc.disable()\n previous_debug_flags = gc.get_debug()\n gc.set_debug(gc.DEBUG_SAVEALL)\n gc.collect()\n previous_garbage = len(gc.garbage)\n result = f(self, **kwargs)\n gc.collect()\n new_garbage = len(gc.garbage)\n if new_garbage > previous_garbage:\n logging.error(\n \"The decorated test created work for Python's garbage collector, \"\n \"likely due to a reference cycle. New objects in cycle(s):\")\n for i, obj in enumerate(gc.garbage[previous_garbage:]):\n try:\n logging.error(\"Object %d of %d\", i,\n len(gc.garbage) - previous_garbage)\n\n def _safe_object_str(obj):\n return \"<%s %d>\" % (obj.__class__.__name__, id(obj))\n\n logging.error(\" Object type: %s\", _safe_object_str(obj))\n logging.error(\n \" Referrer types: %s\", \", \".join(\n [_safe_object_str(ref) for ref in gc.get_referrers(obj)]))\n logging.error(\n \" Referent types: %s\", \", \".join(\n [_safe_object_str(ref) for ref in gc.get_referents(obj)]))\n logging.error(\" Object attribute names: %s\", dir(obj))\n logging.error(\" Object __str__:\")\n logging.error(obj)\n logging.error(\" Object __repr__:\")\n logging.error(repr(obj))\n except Exception: # pylint: disable=broad-except\n logging.error(\"(Exception while printing object)\")\n\n # When garbage is created, this call can help identify reference cycles,\n # which are typically the cause of such garbage.\n if new_garbage > previous_garbage:\n for i in range(previous_garbage, new_garbage):\n if _find_reference_cycle(gc.garbage, i):\n break\n\n # This will fail if any garbage has been created, typically because of a\n # reference cycle.\n self.assertEqual(previous_garbage, new_garbage)\n # TODO(allenl): Figure out why this debug flag reset doesn't work. It would\n # be nice to be able to decorate arbitrary tests in a large test suite and\n # not hold on to every object in other tests.\n gc.set_debug(previous_debug_flags)\n gc.enable()\n return result\n\n return decorator\n\n\ndef _combine_named_parameters(**kwargs):\n \"\"\"Generate combinations based on its keyword arguments.\n\n Two sets of returned combinations can be concatenated using +. Their product\n can be computed using `times()`.\n\n Args:\n **kwargs: keyword arguments of form `option=[possibilities, ...]` or\n `option=the_only_possibility`.\n\n Returns:\n a list of dictionaries for each combination. Keys in the dictionaries are\n the keyword argument names. Each key has one value - one of the\n corresponding keyword argument values.\n \"\"\"\n sort_by_key = lambda k: k[0]\n combinations = []\n for key, values in sorted(kwargs.items(), key=sort_by_key):\n if not isinstance(values, list):\n values = [values]\n combinations.append([(key, value) for value in values])\n\n return [OrderedDict(result) for result in itertools.product(*combinations)]\n\n\ndef generate_combinations_with_testcase_name(**kwargs):\n \"\"\"Generate combinations based on its keyword arguments using combine().\n\n This function calls combine() and appends a testcase name to the list of\n dictionaries returned. The 'testcase_name' key is a required for named\n parameterized tests.\n\n Args:\n **kwargs: keyword arguments of form `option=[possibilities, ...]` or\n `option=the_only_possibility`.\n\n Returns:\n a list of dictionaries for each combination. Keys in the dictionaries are\n the keyword argument names. Each key has one value - one of the\n corresponding keyword argument values.\n \"\"\"\n combinations = _combine_named_parameters(**kwargs)\n named_combinations = []\n for combination in combinations:\n assert isinstance(combination, OrderedDict)\n name = \"\".join([\n \"_{}_{}\".format(\"\".join(filter(str.isalnum, key)),\n \"\".join(filter(str.isalnum, str(value))))\n for key, value in combination.items()\n ])\n named_combinations.append(\n OrderedDict(\n list(combination.items()) +\n [(\"testcase_name\", \"_test{}\".format(name))]))\n\n return named_combinations\n\n\ndef run_all_in_graph_and_eager_modes(cls):\n \"\"\"Execute all test methods in the given class with and without eager.\"\"\"\n base_decorator = run_in_graph_and_eager_modes\n for name in dir(cls):\n if (not name.startswith(unittest.TestLoader.testMethodPrefix) or\n name.startswith(\"testSkipEager\") or\n name.startswith(\"test_skip_eager\") or\n name == \"test_session\"):\n continue\n value = getattr(cls, name, None)\n if callable(value):\n setattr(cls, name, base_decorator(value))\n return cls\n\n\ndef build_as_function_and_v1_graph(func=None):\n \"\"\"Run a test case in v1 graph mode and inside tf.function in eager mode.\n\n WARNING: This decorator can only be used in test cases that statically checks\n generated graph. Attempting to evaluate graph or function results via.\n session.run() or self.evaluate() will fail.\n\n WARNING: This decorator can only be used for test cases that inherit from\n absl.testing.parameterized.TestCase.\n\n Args:\n func: Test case function to be decorated.\n\n Returns:\n Decorated test case function.\n \"\"\"\n\n def decorator(f):\n if tf_inspect.isclass(f):\n raise ValueError(\n \"`run_in_graph_mode_and_function` only supports test methods.\")\n\n @parameterized.named_parameters((\"_v1_graph\", \"v1_graph\"),\n (\"_function\", \"function\"))\n @functools.wraps(f)\n def decorated(self, run_mode, *args, **kwargs):\n if run_mode == \"v1_graph\":\n with ops.Graph().as_default():\n f(self, *args, **kwargs)\n elif run_mode == \"function\":\n\n @def_function.function\n def function_in_eager():\n f(self, *args, **kwargs)\n\n # Create a new graph for the eagerly executed version of this test for\n # better isolation.\n graph_for_eager_test = ops.Graph()\n with graph_for_eager_test.as_default(), context.eager_mode():\n function_in_eager()\n ops.dismantle_graph(graph_for_eager_test)\n else:\n return ValueError(\"Unknown run mode %s\" % run_mode)\n\n return decorated\n\n if func is not None:\n return decorator(func)\n\n return decorator\n\n\ndef run_in_async_and_sync_mode(f):\n \"\"\"Execute the test in async mode and sync mode.\"\"\"\n\n @parameterized.named_parameters([(\"Async\", True), (\"\", False)])\n @functools.wraps(f)\n def decorator(self, async_mode, *args, **kwargs):\n if async_mode:\n with context.execution_mode(context.ASYNC):\n f(self, *args, **kwargs)\n else:\n with context.execution_mode(context.SYNC):\n f(self, *args, **kwargs)\n return decorator\n\n\ndef eager_lazy_remote_copy_on_and_off(f):\n \"\"\"Execute the test method w/o lazy tensor copy for function remote inputs.\"\"\"\n\n @parameterized.named_parameters([(\"WithLazyRemoteCopy\", True), (\"\", False)])\n @functools.wraps(f)\n def decorator(self, lazily_remote_copy, *args, **kwargs):\n if lazily_remote_copy:\n context.context().lazy_remote_inputs_copy = True\n else:\n context.context().lazy_remote_inputs_copy = False\n f(self, *args, **kwargs)\n\n return decorator\n\n\ndef run_in_graph_and_eager_modes(func=None,\n config=None,\n use_gpu=True,\n assert_no_eager_garbage=False):\n \"\"\"Execute the decorated test with and without enabling eager execution.\n\n This function returns a decorator intended to be applied to test methods in\n a `tf.test.TestCase` class. Doing so will cause the contents of the test\n method to be executed twice - once normally, and once with eager execution\n enabled. This allows unittests to confirm the equivalence between eager\n and graph execution (see `tf.compat.v1.enable_eager_execution`).\n\n For example, consider the following unittest:\n\n ```python\n class MyTests(tf.test.TestCase):\n\n @run_in_graph_and_eager_modes\n def test_foo(self):\n x = tf.constant([1, 2])\n y = tf.constant([3, 4])\n z = tf.add(x, y)\n self.assertAllEqual([4, 6], self.evaluate(z))\n\n if __name__ == \"__main__\":\n tf.test.main()\n ```\n\n This test validates that `tf.add()` has the same behavior when computed with\n eager execution enabled as it does when constructing a TensorFlow graph and\n executing the `z` tensor in a session.\n\n `deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and\n `run_in_graph_and_eager_modes` are available decorators for different\n v1/v2/eager/graph combinations.\n\n\n Args:\n func: function to be annotated. If `func` is None, this method returns a\n decorator the can be applied to a function. If `func` is not None this\n returns the decorator applied to `func`.\n config: An optional config_pb2.ConfigProto to use to configure the session\n when executing graphs.\n use_gpu: If True, attempt to run as many operations as possible on GPU.\n assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage\n collector and asserts that no extra garbage has been created when running\n the test with eager execution enabled. This will fail if there are\n reference cycles (e.g. a = []; a.append(a)). Off by default because some\n tests may create garbage for legitimate reasons (e.g. they define a class\n which inherits from `object`), and because DEBUG_SAVEALL is sticky in some\n Python interpreters (meaning that tests which rely on objects being\n collected elsewhere in the unit test file will not work). Additionally,\n checks that nothing still has a reference to Tensors that the test\n allocated.\n\n Returns:\n Returns a decorator that will run the decorated test method twice:\n once by constructing and executing a graph in a session and once with\n eager execution enabled.\n \"\"\"\n\n def decorator(f):\n if tf_inspect.isclass(f):\n raise ValueError(\n \"`run_in_graph_and_eager_modes` only supports test methods. \"\n \"Did you mean to use `run_all_in_graph_and_eager_modes`?\")\n\n def decorated(self, *args, **kwargs):\n try:\n with context.graph_mode():\n with self.test_session(use_gpu=use_gpu, config=config):\n f(self, *args, **kwargs)\n except unittest.case.SkipTest:\n pass\n\n def run_eagerly(self, **kwargs):\n if not use_gpu:\n with ops.device(\"/device:CPU:0\"):\n f(self, *args, **kwargs)\n else:\n f(self, *args, **kwargs)\n\n if assert_no_eager_garbage:\n ops.reset_default_graph()\n run_eagerly = assert_no_new_tensors(\n assert_no_garbage_created(run_eagerly))\n\n # This decorator runs the wrapped test twice.\n # Reset the test environment between runs.\n self.tearDown()\n self._tempdir = None\n # Create a new graph for the eagerly executed version of this test for\n # better isolation.\n graph_for_eager_test = ops.Graph()\n with graph_for_eager_test.as_default(), context.eager_mode():\n self.setUp()\n run_eagerly(self, **kwargs)\n ops.dismantle_graph(graph_for_eager_test)\n\n return tf_decorator.make_decorator(f, decorated)\n\n if func is not None:\n return decorator(func)\n\n return decorator\n\n\ndef py_func_if_in_function(f):\n\n def decorated(*args, **kwds):\n if not ops.inside_function():\n return f(*args, **kwds)\n\n tensor_args = []\n tensor_indices = []\n for i, arg in enumerate(args):\n if isinstance(arg, (ops.Tensor, variables.Variable)):\n tensor_args.append(arg)\n tensor_indices.append(i)\n\n def inner_f(*inner_tensor_args):\n my_args = list(args)\n for i, n in zip(tensor_indices, inner_tensor_args):\n my_args[i] = n\n return f(*my_args, **kwds)\n\n return script_ops.py_func(inner_f, tensor_args, [])\n\n return tf_decorator.make_decorator(f, decorated)\n\n\ndef also_run_as_tf_function(f):\n \"\"\"Runs the decorated test twice--once as is, once inside a tf.function.\n\n This allows you to run a test both in eager execution and inside a\n tf.function, exercising the two execution modes supported in tf 2.0. The test\n assertions are automatically done inside tf.py_funcs, and tf.function ensures\n that they run in the proper order and with the proper side effects.\n\n Currently variable creation is not supported in tests annotated with this\n decorator since it's tricky to ensure the variable doesn't get repeatedly\n created when retracing the tf.function.\n\n Args:\n f: the test method to be decorated\n\n Returns:\n The decorated test method, which will run both in eager and inside a\n tf.function.\n \"\"\"\n\n def decorated(*args, **kwds):\n\n def bound_f():\n f(*args, **kwds)\n\n with context.eager_mode():\n # Running in eager mode\n bound_f()\n # Running as TF function\n # TODO(b/121143941): Remove the autograph override.\n def_function.function(bound_f, autograph=False)()\n\n return decorated\n\n\ndef deprecated_graph_mode_only(func=None):\n \"\"\"Execute the decorated test in graph mode.\n\n This function returns a decorator intended to be applied to tests that are not\n compatible with eager mode. When this decorator is applied, the test body will\n be run in an environment where API calls construct graphs instead of executing\n eagerly.\n\n `deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and\n `run_in_graph_and_eager_modes` are available decorators for different\n v1/v2/eager/graph combinations.\n\n Args:\n func: function to be annotated. If `func` is None, this method returns a\n decorator the can be applied to a function. If `func` is not None this\n returns the decorator applied to `func`.\n\n Returns:\n Returns a decorator that will run the decorated test method in graph mode.\n \"\"\"\n\n def decorator(f):\n if tf_inspect.isclass(f):\n setup = f.__dict__.get(\"setUp\")\n if setup is not None:\n setattr(f, \"setUp\", decorator(setup))\n\n for name, value in f.__dict__.copy().items():\n if (callable(value) and\n name.startswith(unittest.TestLoader.testMethodPrefix)):\n setattr(f, name, decorator(value))\n\n return f\n\n def decorated(self, *args, **kwargs):\n if context.executing_eagerly():\n with context.graph_mode():\n return f(self, *args, **kwargs)\n else:\n return f(self, *args, **kwargs)\n\n return decorated\n\n if func is not None:\n return decorator(func)\n\n return decorator\n\n\nrun_deprecated_v1 = deprecated_graph_mode_only\n\n\ndef run_all_in_deprecated_graph_mode_only(cls):\n \"\"\"Execute all tests in a class in graph mode.\"\"\"\n base_decorator = deprecated_graph_mode_only\n for name in dir(cls):\n if (not name.startswith(unittest.TestLoader.testMethodPrefix) or\n name == \"test_session\"):\n continue\n value = getattr(cls, name, None)\n if callable(value):\n setattr(cls, name, base_decorator(value))\n return cls\n\n\ndef run_v1_only(reason, func=None):\n \"\"\"Execute the decorated test only if running in v1 mode.\n\n This function is intended to be applied to tests that exercise v1 only\n functionality. If the test is run in v2 mode it will simply be skipped.\n\n `deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and\n `run_in_graph_and_eager_modes` are available decorators for different\n v1/v2/eager/graph combinations.\n\n Args:\n reason: string giving a reason for limiting the test to v1 only.\n func: function to be annotated. If `func` is None, this method returns a\n decorator the can be applied to a function. If `func` is not None this\n returns the decorator applied to `func`.\n\n Returns:\n Returns a decorator that will conditionally skip the decorated test method.\n \"\"\"\n if not isinstance(reason, str):\n raise ValueError(\"'reason' should be string, got {}\".format(type(reason)))\n\n def decorator(f):\n if tf_inspect.isclass(f):\n # To skip an entire test suite class, we only decorate the setUp method\n # to skip all tests. There are cases when setUp is not defined (not\n # overridden in subclasses of TestCase, so not available in f.__dict__\n # below). For those cases, we walk the method resolution order list and\n # pick the first setUp method we find (usually this should be the one in\n # the parent class since that's the TestCase class).\n for cls in type.mro(f):\n setup = cls.__dict__.get(\"setUp\")\n if setup is not None:\n setattr(f, \"setUp\", decorator(setup))\n break\n\n return f\n else:\n # If f is just a function, just create a decorator for it and return it\n def decorated(self, *args, **kwargs):\n if tf2.enabled():\n self.skipTest(reason)\n\n return f(self, *args, **kwargs)\n\n return decorated\n\n if func is not None:\n return decorator(func)\n\n return decorator\n\n\ndef run_v2_only(func=None):\n \"\"\"Execute the decorated test only if running in v2 mode.\n\n This function is intended to be applied to tests that exercise v2 only\n functionality. If the test is run in v1 mode it will simply be skipped.\n\n `deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and\n `run_in_graph_and_eager_modes` are available decorators for different\n v1/v2/eager/graph combinations.\n\n Args:\n func: function to be annotated. If `func` is None, this method returns a\n decorator the can be applied to a function. If `func` is not None this\n returns the decorator applied to `func`.\n\n Returns:\n Returns a decorator that will conditionally skip the decorated test method.\n \"\"\"\n\n def decorator(f):\n if tf_inspect.isclass(f):\n raise ValueError(\"`run_v2_only` only supports test methods.\")\n\n def decorated(self, *args, **kwargs):\n if not tf2.enabled():\n self.skipTest(\"Test is only compatible with v2\")\n\n return f(self, *args, **kwargs)\n\n return decorated\n\n if func is not None:\n return decorator(func)\n\n return decorator\n\n\ndef run_gpu_only(func=None):\n \"\"\"Execute the decorated test only if a GPU is available.\n\n This function is intended to be applied to tests that require the presence\n of a GPU. If a GPU is absent, it will simply be skipped.\n\n Args:\n func: function to be annotated. If `func` is None, this method returns a\n decorator the can be applied to a function. If `func` is not None this\n returns the decorator applied to `func`.\n\n Returns:\n Returns a decorator that will conditionally skip the decorated test method.\n \"\"\"\n\n def decorator(f):\n if tf_inspect.isclass(f):\n raise ValueError(\"`run_gpu_only` only supports test methods.\")\n\n def decorated(self, *args, **kwargs):\n if not is_gpu_available():\n self.skipTest(\"Test requires GPU\")\n\n return f(self, *args, **kwargs)\n\n return decorated\n\n if func is not None:\n return decorator(func)\n\n return decorator\n\n\ndef run_cuda_only(func=None):\n \"\"\"Execute the decorated test only if a GPU is available.\n\n This function is intended to be applied to tests that require the presence\n of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.\n\n Args:\n func: function to be annotated. If `func` is None, this method returns a\n decorator the can be applied to a function. If `func` is not None this\n returns the decorator applied to `func`.\n\n Returns:\n Returns a decorator that will conditionally skip the decorated test method.\n \"\"\"\n\n def decorator(f):\n if tf_inspect.isclass(f):\n raise ValueError(\"`run_cuda_only` only supports test methods.\")\n\n def decorated(self, *args, **kwargs):\n if not is_gpu_available(cuda_only=True):\n self.skipTest(\"Test requires CUDA GPU\")\n\n return f(self, *args, **kwargs)\n\n return decorated\n\n if func is not None:\n return decorator(func)\n\n return decorator\n\n\ndef with_forward_compatibility_horizons(*horizons):\n \"\"\"Executes the decorated test with the specified forward-compat horizons.\n\n Args:\n *horizons: A list of (year, month, day) tuples. If the list includes\n `None`, then the test will also be run with no forward-compatibility\n horizon set.\n\n Returns:\n A decorator that will execute the test with the specified horizons.\n \"\"\"\n if not horizons:\n raise ValueError(\"Expected at least one horizon.\")\n for horizon in horizons:\n if not ((horizon is None) or\n (len(horizon) == 3 and all(isinstance(x, int) for x in horizon))):\n raise ValueError(\"Bad horizon value: %r\" % horizon)\n\n def decorator(f):\n if tf_inspect.isclass(f):\n raise ValueError(\"`with_forward_compatibility_horizons` only \"\n \"supports test methods.\")\n def decorated(self, *args, **kwargs):\n for horizon in horizons:\n if horizon is None:\n f(self, *args, **kwargs)\n else:\n (year, month, day) = horizon\n with forward_compatibility_horizon(year, month, day):\n f(self, *args, **kwargs)\n return decorated\n\n return decorator\n\n\[email protected](None,\n \"Use `tf.config.list_physical_devices('GPU')` instead.\")\n@tf_export(\"test.is_gpu_available\")\ndef is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):\n \"\"\"Returns whether TensorFlow can access a GPU.\n\n Warning: if a non-GPU version of the package is installed, the function would\n also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow\n was build with CUDA support.\n\n For example,\n >>> gpu_available = tf.test.is_gpu_available()\n >>> is_cuda_gpu_available = tf.test.is_gpu_available(cuda_only=True)\n >>> is_cuda_gpu_min_3 = tf.test.is_gpu_available(True, (3,0))\n\n Args:\n cuda_only: limit the search to CUDA GPUs.\n min_cuda_compute_capability: a (major,minor) pair that indicates the minimum\n CUDA compute capability required, or None if no requirement.\n\n Note that the keyword arg name \"cuda_only\" is misleading (since routine will\n return true when a GPU device is available irrespective of whether TF was\n built with CUDA support or ROCm support. However no changes here because\n\n ++ Changing the name \"cuda_only\" to something more generic would break\n backward compatibility\n\n ++ Adding an equivalent \"rocm_only\" would require the implementation check\n the build type. This in turn would require doing the same for CUDA and thus\n potentially break backward compatibility\n\n ++ Adding a new \"cuda_or_rocm_only\" would not break backward compatibility,\n but would require most (if not all) callers to update the call to use\n \"cuda_or_rocm_only\" instead of \"cuda_only\"\n\n Returns:\n True if a GPU device of the requested kind is available.\n \"\"\"\n\n # This was needed earlier when we had support for SYCL in TensorFlow.\n del cuda_only\n\n try:\n for local_device in device_lib.list_local_devices():\n if local_device.device_type == \"GPU\":\n gpu_info = gpu_util.compute_capability_from_device_desc(local_device)\n cc = gpu_info.compute_capability or (0, 0)\n if not min_cuda_compute_capability or cc >= min_cuda_compute_capability:\n return True\n return False\n except errors_impl.NotFoundError as e:\n if not all(x in str(e) for x in [\"CUDA\", \"not find\"]):\n raise e\n else:\n logging.error(str(e))\n return False\n\n\[email protected]\ndef device(use_gpu):\n \"\"\"Uses gpu when requested and available.\"\"\"\n if use_gpu and is_gpu_available():\n dev = \"/device:GPU:0\"\n else:\n dev = \"/device:CPU:0\"\n with ops.device(dev):\n yield\n\n\[email protected]\ndef use_gpu():\n \"\"\"Uses gpu when requested and available.\"\"\"\n with device(use_gpu=True):\n yield\n\n\[email protected]\ndef force_gpu():\n \"\"\"Force the gpu to be used.\"\"\"\n with ops.device(\"/device:GPU:0\"):\n yield\n\n\[email protected]\ndef force_cpu():\n \"\"\"Force the cpu to be used.\"\"\"\n with ops.device(\"/device:CPU:0\"):\n yield\n\n\nclass CapturedWrites(object):\n \"\"\"A utility class to load the captured writes made to a stream.\"\"\"\n\n def __init__(self, capture_location):\n self.capture_location = capture_location\n\n def contents(self):\n \"\"\"Get the captured writes as a single string.\"\"\"\n with open(self.capture_location) as tmp_file:\n output_data = \"\".join(tmp_file.readlines())\n return output_data\n\n\nclass FakeEagerSession(object):\n \"\"\"Fake session so tests that conditionally use placeholders can use eager.\n\n There are a number of tests that conditionally use placeholders for shape\n inference. The pattern is demonstrated here:\n\n ```python\n with self.cached_session() as sess:\n if static_shape:\n y = math_ops.matmul(x, ...)\n feed_dict = {}\n else:\n x_ph = array_ops.placeholder(...)\n y = math_ops.matmul(x_ph, ...)\n feed_dict = {x_ph: x}\n val = sess.run(y, feed_dict=feed_dict)\n ```\n\n Since the feed_dict is empty when not using placeholders we should be able to\n call self.evaluate(), however this requires rewriting the test case.\n This class should be considered a stop-gap solution to get tests running with\n eager with minimal changes to the actual test.\n \"\"\"\n\n def __init__(self, test_case):\n self._test_case = test_case\n\n def run(self, fetches, *args, **kwargs):\n \"\"\"Evaluate `fetches`.\n\n Fail if additional args are specified.\n\n Args:\n fetches: A Tensor or a nested list/tuple of Tensors.\n *args: Positional arguments\n **kwargs: Keyword arguments\n\n Raises:\n RuntimeError: If args or kwargs are specified.\n\n Returns:\n Tensors as numpy values.\n \"\"\"\n feed_dict = kwargs.pop(\"feed_dict\", {})\n if feed_dict:\n raise RuntimeError(\n \"feed_dict is not supported when eager execution is enabled \"\n \"(in this case, sess.run(t) is shorthand for t.numpy()\")\n\n if args or kwargs:\n raise RuntimeError(\n \"Optional args are not supported when eager execution is enabled \"\n \"(in this case, sess.run(t) is shorthand for t.numpy()\")\n\n return self._test_case.evaluate(fetches)\n\n\nclass ErrorLoggingSession(session.Session):\n \"\"\"Wrapper around a Session that logs errors in run().\"\"\"\n\n def run(self, *args, **kwargs):\n try:\n return super(ErrorLoggingSession, self).run(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except\n # Note: disable the logging for OutOfRangeError, which makes the output\n # of tf.data tests hard to read, because OutOfRangeError is used as the\n # signal completion\n if not isinstance(e, errors.OutOfRangeError):\n logging.error(str(e))\n raise\n\n\ndef disable_cudnn_autotune(func):\n \"\"\"Disable autotuning during the call to this function.\n\n Some tests want to base assertions on a graph being isomorphic with a copy.\n To ensure this, this decorator disables autotuning.\n\n Args:\n func: Function to run with CuDNN autotuning turned off.\n\n Returns:\n Decorated function.\n \"\"\"\n\n def decorator(f):\n\n def decorated(self, *args, **kwargs):\n original_tf_cudnn_use_autotune = os.environ.get(\"TF_CUDNN_USE_AUTOTUNE\")\n os.environ[\"TF_CUDNN_USE_AUTOTUNE\"] = \"false\"\n original_xla_flags = os.environ.get(\"XLA_FLAGS\")\n new_xla_flags = \"--xla_gpu_autotune_level=0\"\n if original_xla_flags:\n new_xla_flags = original_xla_flags + \" \" + new_xla_flags\n os.environ[\"XLA_FLAGS\"] = new_xla_flags\n\n result = f(self, *args, **kwargs)\n\n if (original_tf_cudnn_use_autotune is None):\n del os.environ[\"TF_CUDNN_USE_AUTOTUNE\"]\n else:\n os.environ[\"TF_CUDNN_USE_AUTOTUNE\"] = original_tf_cudnn_use_autotune\n if (original_xla_flags is None):\n del os.environ[\"XLA_FLAGS\"]\n else:\n os.environ[\"XLA_FLAGS\"] = original_xla_flags\n\n return result\n\n return decorated\n\n if func is not None:\n return decorator(func)\n\n return decorator\n\n\n# The description is just for documentation purposes.\ndef enable_tf_xla_constant_folding(description):\n\n if not isinstance(description, str):\n raise ValueError(\"'description' should be string, got {}\".format(\n type(description)))\n\n def enable_tf_xla_constant_folding_impl(func):\n \"\"\"Enable constant folding during the call to this function.\n\n Some tests fail without constant folding.\n\n Args:\n func: Function to run with constant folding turned on.\n\n Returns:\n Decorated function.\n \"\"\"\n\n def decorator(f):\n\n def decorated(self, *args, **kwargs):\n original_var = pywrap_tf_session.TF_GetXlaConstantFoldingDisabled()\n pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(False)\n result = f(self, *args, **kwargs)\n pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(original_var)\n return result\n\n return decorated\n\n if func is not None:\n return decorator(func)\n\n return decorator\n\n return enable_tf_xla_constant_folding_impl\n\n\n# Updates test function by selectively disabling it.\ndef _disable_test(execute_func):\n\n def disable_test_impl(func):\n\n def decorator(func):\n\n def decorated(self, *args, **kwargs):\n if execute_func:\n return func(self, *args, **kwargs)\n\n return decorated\n\n if func is not None:\n return decorator(func)\n\n return decorator\n\n return disable_test_impl\n\n\n# The description is just for documentation purposes.\ndef disable_xla(description): # pylint: disable=unused-argument\n \"\"\"Execute the test method only if xla is not enabled.\"\"\"\n execute_func = not is_xla_enabled()\n return _disable_test(execute_func)\n\n\n# The description is just for documentation purposes.\ndef disable_mlir_bridge(description): # pylint: disable=unused-argument\n \"\"\"Execute the test method only if MLIR bridge is not enabled.\"\"\"\n execute_func = not is_mlir_bridge_enabled()\n return _disable_test(execute_func)\n\n\n# The description is just for documentation purposes.\ndef disable_tfrt(unused_description):\n\n def disable_tfrt_impl(cls_or_func):\n \"\"\"Execute the test only if tfrt is not enabled.\"\"\"\n\n if tf_inspect.isclass(cls_or_func):\n if tfrt_utils.enabled():\n return None\n else:\n return cls_or_func\n else:\n def decorator(func):\n\n def decorated(self, *args, **kwargs):\n if tfrt_utils.enabled():\n return\n else:\n return func(self, *args, **kwargs)\n\n return decorated\n\n if cls_or_func is not None:\n return decorator(cls_or_func)\n\n return decorator\n\n return disable_tfrt_impl\n\n\ndef for_all_test_methods(decorator, *args, **kwargs):\n \"\"\"Generate class-level decorator from given method-level decorator.\n\n It is expected for the given decorator to take some arguments and return\n a method that is then called on the test method to produce a decorated\n method.\n\n Args:\n decorator: The decorator to apply.\n *args: Positional arguments\n **kwargs: Keyword arguments\n Returns: Function that will decorate a given classes test methods with the\n decorator.\n \"\"\"\n\n def all_test_methods_impl(cls):\n \"\"\"Apply decorator to all test methods in class.\"\"\"\n for name in dir(cls):\n value = getattr(cls, name)\n if callable(value) and name.startswith(\n \"test\") and (name != \"test_session\"):\n setattr(cls, name, decorator(*args, **kwargs)(value))\n return cls\n\n return all_test_methods_impl\n\n\n# The description is just for documentation purposes.\ndef no_xla_auto_jit(description): # pylint: disable=unused-argument\n \"\"\"This test is not intended to be run with XLA auto jit enabled.\"\"\"\n execute_func = not is_xla_enabled()\n return _disable_test(execute_func)\n\n\n# The description is just for documentation purposes.\ndef xla_allow_fallback(description): # pylint: disable=unused-argument\n\n def xla_allow_fallback_impl(func):\n \"\"\"Allow fallback to TF even though testing xla.\"\"\"\n\n def decorator(func):\n\n def decorated(self, *args, **kwargs):\n if is_xla_enabled():\n # Update the global XLABuildOpsPassFlags to enable lazy compilation,\n # which allows the compiler to fall back to TF classic. Remember the\n # old value so that we can reset it.\n old_value = pywrap_tf_session.TF_SetXlaEnableLazyCompilation(True)\n result = func(self, *args, **kwargs)\n pywrap_tf_session.TF_SetXlaEnableLazyCompilation(old_value)\n return result\n else:\n return func(self, *args, **kwargs)\n\n return decorated\n\n if func is not None:\n return decorator(func)\n\n return decorator\n\n return xla_allow_fallback_impl\n\n\n# The description is just for documentation purposes.\ndef run_without_tensor_float_32(description): # pylint: disable=unused-argument\n \"\"\"Execute test with TensorFloat-32 disabled.\n\n While almost every real-world deep learning model runs fine with\n TensorFloat-32, many tests use assertAllClose or similar methods.\n TensorFloat-32 matmuls typically will cause such methods to fail with the\n default tolerances.\n\n Args:\n description: A description used for documentation purposes, describing why\n the test requires TensorFloat-32 to be disabled.\n\n Returns:\n Decorator which runs a test with TensorFloat-32 disabled.\n \"\"\"\n\n def decorator(f):\n\n @functools.wraps(f)\n def decorated(self, *args, **kwargs):\n allowed = config.tensor_float_32_execution_enabled()\n try:\n config.enable_tensor_float_32_execution(False)\n f(self, *args, **kwargs)\n finally:\n config.enable_tensor_float_32_execution(allowed)\n\n return decorated\n\n return decorator\n\n\n# The description is just for documentation purposes.\ndef run_all_without_tensor_float_32(description): # pylint: disable=unused-argument\n \"\"\"Execute all tests in a class with TensorFloat-32 disabled.\"\"\"\n return for_all_test_methods(run_without_tensor_float_32, description)\n\n\ndef matmul_without_tf32(a, b, *args, **kwargs):\n \"\"\"Run matmul but cast float32 inputs to float64 if TensorFloat-32 is enabled.\n\n This effectively runs matmul without TensorFloat-32. It should only be used in\n tests when verifying some other op or functions works correctly, e.g. to test\n `tf.linalg.sqrtm` by matrix multiplying the output of the op by itself. In\n such cases, the matmul itself is not being tested so it's OK to run it with\n higher precision.\n\n If a matmul itself is being tested, or some other op which uses matmul, use\n `run_without_tensor_float_32` instead.\n\n This also casts complex64 inputs to complex128, since TensorFloat-32 can also\n be used with complex64\n\n Args:\n a: First input to tf.linalg.matmul\n b: Second input to tf.linalg.matmul\n args: Other positional arguments to tf.linalg.matmul\n **kwargs: Other keyword arguments to tf.linalg.matmul\n\n Returns:\n A tensor with the same type as `a`.\n \"\"\"\n if config.tensor_float_32_execution_enabled() and a.dtype == \"float32\":\n a = math_ops.cast(a, \"float64\")\n b = math_ops.cast(b, \"float64\")\n ret = math_ops.matmul(a, b, *args, **kwargs)\n return math_ops.cast(ret, a.dtype)\n elif config.tensor_float_32_execution_enabled() and a.dtype == \"complex64\":\n a = math_ops.cast(a, \"complex128\")\n b = math_ops.cast(b, \"complex128\")\n ret = math_ops.matmul(a, b, *args, **kwargs)\n return math_ops.cast(ret, a.dtype)\n else:\n return math_ops.matmul(a, b, *args, **kwargs)\n\n\nclass EagerSessionWarner(object):\n\n def __getattr__(self, attr):\n raise AttributeError(\n \"Trying to access properties or call methods on the result of \"\n \"self.session(), self.cached_session(), etc while eager execution \"\n \"is enabled. If you're porting this test case to TF 2.0, either \"\n \"adapt the test to work with eager execution or insert a call to \"\n \"tf.disable_eager_execution() in the main() function of this test \"\n \"file.\")\n\n\n@tf_export(\"test.TestCase\")\nclass TensorFlowTestCase(googletest.TestCase):\n \"\"\"Base class for tests that need to test TensorFlow.\"\"\"\n\n def __init__(self, methodName=\"runTest\"): # pylint: disable=invalid-name\n super(TensorFlowTestCase, self).__init__(methodName)\n if is_xla_enabled():\n pywrap_tf_session.TF_SetXlaAutoJitMode(\"2\")\n pywrap_tf_session.TF_SetXlaMinClusterSize(1)\n pywrap_tf_session.TF_SetXlaEnableLazyCompilation(False)\n pywrap_tf_session.TF_SetTfXlaCpuGlobalJit(True)\n # Constant folding secretly runs code on TF:Classic CPU, so we also\n # disable it here.\n pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(True)\n\n # Check if the mlir bridge has been explicitly enabled or disabled. If\n # is_mlir_bridge_enabled() returns None, the user did not explictly enable\n # or disable the bridge so do not update enable_mlir_bridge.\n if is_mlir_bridge_enabled():\n context.context().enable_mlir_bridge = True\n elif is_mlir_bridge_enabled() is not None:\n context.context().enable_mlir_bridge = False\n\n self._threads = []\n self._tempdir = None\n self._cached_session = None\n self._test_start_time = None\n # This flag provides the ability to control whether the graph mode gets\n # initialized for TF1 or not. Initializing for TF1, which is what was\n # happening earlier, was preventing enablement of 'eager mode' in the test.\n self._set_default_seed = True\n\n def setUp(self):\n super(TensorFlowTestCase, self).setUp()\n self._ClearCachedSession()\n random.seed(random_seed.DEFAULT_GRAPH_SEED)\n np.random.seed(random_seed.DEFAULT_GRAPH_SEED)\n # Note: The following line is necessary because some test methods may error\n # out from within nested graph contexts (e.g., via assertRaises and\n # assertRaisesRegexp), which may leave ops._default_graph_stack non-empty\n # under certain versions of Python. That would cause\n # ops.reset_default_graph() to throw an exception if the stack were not\n # cleared first.\n ops._default_graph_stack.reset() # pylint: disable=protected-access\n ops.reset_default_graph()\n if self._set_default_seed:\n random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)\n # Reset summary writer in case another test used set_as_default() with their\n # summary writer.\n summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access\n summary_state.writer = None\n\n # Avoiding calling setUp() for the poorly named test_session method.\n if self.id().endswith(\".test_session\"):\n self.skipTest(\"Not a test.\")\n\n self._test_start_time = time.time()\n\n def tearDown(self):\n # If a subclass overrides setUp and doesn't call the parent class's setUp,\n # then we may not have set the start time.\n if self._test_start_time is not None:\n logging.info(\"time(%s): %ss\", self.id(),\n round(time.time() - self._test_start_time, 2))\n\n for thread in self._threads:\n thread.check_termination()\n\n self._ClearCachedSession()\n super(TensorFlowTestCase, self).tearDown()\n\n def _ClearCachedSession(self):\n if self._cached_session is not None:\n self._cached_session.close()\n self._cached_session = None\n\n def get_temp_dir(self):\n \"\"\"Returns a unique temporary directory for the test to use.\n\n If you call this method multiple times during in a test, it will return the\n same folder. However, across different runs the directories will be\n different. This will ensure that across different runs tests will not be\n able to pollute each others environment.\n If you need multiple unique directories within a single test, you should\n use tempfile.mkdtemp as follows:\n tempfile.mkdtemp(dir=self.get_temp_dir()):\n\n Returns:\n string, the path to the unique temporary directory created for this test.\n \"\"\"\n if not self._tempdir:\n self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())\n return self._tempdir\n\n @contextlib.contextmanager\n def captureWritesToStream(self, stream):\n \"\"\"A context manager that captures the writes to a given stream.\n\n This context manager captures all writes to a given stream inside of a\n `CapturedWrites` object. When this context manager is created, it yields\n the `CapturedWrites` object. The captured contents can be accessed by\n calling `.contents()` on the `CapturedWrites`.\n\n For this function to work, the stream must have a file descriptor that\n can be modified using `os.dup` and `os.dup2`, and the stream must support\n a `.flush()` method. The default python sys.stdout and sys.stderr are\n examples of this. Note that this does not work in Colab or Jupyter\n notebooks, because those use alternate stdout streams.\n\n Example:\n ```python\n class MyOperatorTest(test_util.TensorFlowTestCase):\n def testMyOperator(self):\n input = [1.0, 2.0, 3.0, 4.0, 5.0]\n with self.captureWritesToStream(sys.stdout) as captured:\n result = MyOperator(input).eval()\n self.assertStartsWith(captured.contents(), \"This was printed.\")\n ```\n\n Args:\n stream: The stream whose writes should be captured. This stream must have\n a file descriptor, support writing via using that file descriptor, and\n must have a `.flush()` method.\n\n Yields:\n A `CapturedWrites` object that contains all writes to the specified stream\n made during this context.\n \"\"\"\n stream.flush()\n fd = stream.fileno()\n tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())\n tmp_file = open(tmp_file_path, \"w\")\n orig_fd = os.dup(fd)\n os.dup2(tmp_file.fileno(), fd)\n try:\n yield CapturedWrites(tmp_file_path)\n finally:\n tmp_file.close()\n os.dup2(orig_fd, fd)\n\n def _AssertProtoEquals(self, a, b, msg=None):\n \"\"\"Asserts that a and b are the same proto.\n\n Uses ProtoEq() first, as it returns correct results\n for floating point attributes, and then use assertProtoEqual()\n in case of failure as it provides good error messages.\n\n Args:\n a: a proto.\n b: another proto.\n msg: Optional message to report on failure.\n \"\"\"\n if not compare.ProtoEq(a, b):\n compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)\n\n def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):\n \"\"\"Asserts that message is same as parsed expected_message_ascii.\n\n Creates another prototype of message, reads the ascii message into it and\n then compares them using self._AssertProtoEqual().\n\n Args:\n expected_message_maybe_ascii: proto message in original or ascii form.\n message: the message to validate.\n msg: Optional message to report on failure.\n \"\"\"\n if isinstance(expected_message_maybe_ascii, type(message)):\n expected_message = expected_message_maybe_ascii\n self._AssertProtoEquals(expected_message, message, msg=msg)\n elif isinstance(expected_message_maybe_ascii, (str, bytes)):\n expected_message = type(message)()\n text_format.Merge(\n expected_message_maybe_ascii,\n expected_message,\n descriptor_pool=descriptor_pool.Default())\n self._AssertProtoEquals(expected_message, message, msg=msg)\n else:\n assert False, (\"Can't compare protos of type %s and %s.\" %\n (type(expected_message_maybe_ascii), type(message)))\n\n def assertProtoEqualsVersion(\n self,\n expected,\n actual,\n producer=versions.GRAPH_DEF_VERSION,\n min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,\n msg=None):\n expected = \"versions { producer: %d min_consumer: %d };\\n%s\" % (\n producer, min_consumer, expected)\n self.assertProtoEquals(expected, actual, msg=msg)\n\n def assertStartsWith(self, actual, expected_start, msg=None):\n \"\"\"Assert that actual.startswith(expected_start) is True.\n\n Args:\n actual: str\n expected_start: str\n msg: Optional message to report on failure.\n \"\"\"\n if not actual.startswith(expected_start):\n fail_msg = \"%r does not start with %r\" % (actual, expected_start)\n fail_msg += \" : %r\" % (msg) if msg else \"\"\n self.fail(fail_msg)\n\n def _eval_tensor(self, tensor):\n if tensor is None:\n return None\n elif callable(tensor):\n return self._eval_helper(tensor())\n else:\n try:\n if sparse_tensor.is_sparse(tensor):\n return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),\n tensor.values.numpy(),\n tensor.dense_shape.numpy())\n elif ragged_tensor.is_ragged(tensor):\n return ragged_tensor_value.RaggedTensorValue(\n self._eval_tensor(tensor.values),\n self._eval_tensor(tensor.row_splits))\n elif isinstance(tensor, ops.IndexedSlices):\n return ops.IndexedSlicesValue(\n values=tensor.values.numpy(),\n indices=tensor.indices.numpy(),\n dense_shape=tensor.dense_shape.numpy())\n # Convert tensors and composite tensors to numpy arrays.\n return nest.map_structure(lambda t: t.numpy(), tensor,\n expand_composites=True)\n except AttributeError as e:\n six.raise_from(ValueError(\"Unsupported type %s.\" % type(tensor)), e)\n\n def _eval_helper(self, tensors):\n if tensors is None:\n return None\n return nest.map_structure(self._eval_tensor, tensors)\n\n def evaluate(self, tensors):\n \"\"\"Evaluates tensors and returns numpy values.\n\n Args:\n tensors: A Tensor or a nested list/tuple of Tensors.\n\n Returns:\n tensors numpy values.\n \"\"\"\n if context.executing_eagerly():\n return self._eval_helper(tensors)\n else:\n sess = ops.get_default_session()\n if sess is None:\n with self.test_session() as sess:\n return sess.run(tensors)\n else:\n return sess.run(tensors)\n\n # pylint: disable=g-doc-return-or-yield\n @contextlib.contextmanager\n def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):\n \"\"\"A context manager for a TensorFlow Session for use in executing tests.\n\n Note that this will set this session and the graph as global defaults.\n\n Use the `use_gpu` and `force_gpu` options to control where ops are run. If\n `force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if\n `use_gpu` is True, TensorFlow tries to run as many ops on the GPU as\n possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to\n the CPU.\n\n Example:\n\n ``` python\n class MyOperatorTest(test_util.TensorFlowTestCase):\n def testMyOperator(self):\n with self.session(use_gpu=True):\n valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]\n result = MyOperator(valid_input).eval()\n self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]\n invalid_input = [-1.0, 2.0, 7.0]\n with self.assertRaisesOpError(\"negative input not supported\"):\n MyOperator(invalid_input).eval()\n ```\n\n Args:\n graph: Optional graph to use during the returned session.\n config: An optional config_pb2.ConfigProto to use to configure the\n session.\n use_gpu: If True, attempt to run as many ops as possible on GPU.\n force_gpu: If True, pin all ops to `/device:GPU:0`.\n\n Yields:\n A Session object that should be used as a context manager to surround\n the graph building and execution code in a test case.\n \"\"\"\n if context.executing_eagerly():\n yield EagerSessionWarner()\n else:\n with self._create_session(graph, config, force_gpu) as sess:\n with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):\n yield sess\n\n @contextlib.contextmanager\n def cached_session(self,\n graph=None,\n config=None,\n use_gpu=False,\n force_gpu=False):\n \"\"\"Returns a TensorFlow Session for use in executing tests.\n\n This method behaves differently than self.session(): for performance reasons\n `cached_session` will by default reuse the same session within the same\n test. The session returned by this function will only be closed at the end\n of the test (in the TearDown function).\n\n Use the `use_gpu` and `force_gpu` options to control where ops are run. If\n `force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if\n `use_gpu` is True, TensorFlow tries to run as many ops on the GPU as\n possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to\n the CPU.\n\n Example:\n ```python\n class MyOperatorTest(test_util.TensorFlowTestCase):\n def testMyOperator(self):\n with self.cached_session(use_gpu=True) as sess:\n valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]\n result = MyOperator(valid_input).eval()\n self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]\n invalid_input = [-1.0, 2.0, 7.0]\n with self.assertRaisesOpError(\"negative input not supported\"):\n MyOperator(invalid_input).eval()\n ```\n\n Args:\n graph: Optional graph to use during the returned session.\n config: An optional config_pb2.ConfigProto to use to configure the\n session.\n use_gpu: If True, attempt to run as many ops as possible on GPU.\n force_gpu: If True, pin all ops to `/device:GPU:0`.\n\n Yields:\n A Session object that should be used as a context manager to surround\n the graph building and execution code in a test case.\n \"\"\"\n if context.executing_eagerly():\n yield FakeEagerSession(self)\n else:\n sess = self._get_cached_session(\n graph, config, force_gpu, crash_if_inconsistent_args=True)\n with self._constrain_devices_and_set_default(sess, use_gpu,\n force_gpu) as cached:\n yield cached\n\n @contextlib.contextmanager\n @deprecation.deprecated(None, \"Use `self.session()` or \"\n \"`self.cached_session()` instead.\")\n def test_session(self,\n graph=None,\n config=None,\n use_gpu=False,\n force_gpu=False):\n \"\"\"Use cached_session instead.\"\"\"\n if self.id().endswith(\".test_session\"):\n self.skipTest(\n \"Tests that have the name \\\"test_session\\\" are automatically skipped \"\n \"by TensorFlow test fixture, as the name is reserved for creating \"\n \"sessions within tests. Please rename your test if you have a test \"\n \"with this name.\")\n if context.executing_eagerly():\n yield None\n else:\n if graph is None:\n sess = self._get_cached_session(\n graph, config, force_gpu, crash_if_inconsistent_args=False)\n with self._constrain_devices_and_set_default(sess, use_gpu,\n force_gpu) as cached:\n yield cached\n else:\n with self.session(graph, config, use_gpu, force_gpu) as sess:\n yield sess\n\n # pylint: enable=g-doc-return-or-yield\n\n class _CheckedThread(object):\n \"\"\"A wrapper class for Thread that asserts successful completion.\n\n This class should be created using the TensorFlowTestCase.checkedThread()\n method.\n \"\"\"\n\n def __init__(self, testcase, target, args=None, kwargs=None):\n \"\"\"Constructs a new instance of _CheckedThread.\n\n Args:\n testcase: The TensorFlowTestCase for which this thread is being created.\n target: A callable object representing the code to be executed in the\n thread.\n args: A tuple of positional arguments that will be passed to target.\n kwargs: A dictionary of keyword arguments that will be passed to target.\n \"\"\"\n self._testcase = testcase\n self._target = target\n self._args = () if args is None else args\n self._kwargs = {} if kwargs is None else kwargs\n self._thread = threading.Thread(target=self._protected_run)\n self._exception = None\n\n self._is_thread_joined = False\n\n def _protected_run(self):\n \"\"\"Target for the wrapper thread. Sets self._exception on failure.\"\"\"\n try:\n self._target(*self._args, **self._kwargs)\n except Exception as e: # pylint: disable=broad-except\n self._exception = e\n\n def start(self):\n \"\"\"Starts the thread's activity.\n\n This must be called at most once per _CheckedThread object. It arranges\n for the object's target to be invoked in a separate thread of control.\n \"\"\"\n self._thread.start()\n\n def join(self):\n \"\"\"Blocks until the thread terminates.\n\n Raises:\n self._testcase.failureException: If the thread terminates with due to\n an exception.\n \"\"\"\n self._is_thread_joined = True\n self._thread.join()\n if self._exception is not None:\n self._testcase.fail(\"Error in checkedThread: %s\" % str(self._exception))\n\n def is_alive(self):\n \"\"\"Returns whether the thread is alive.\n\n This method returns True just before the run() method starts\n until just after the run() method terminates.\n\n Returns:\n True if the thread is alive, otherwise False.\n \"\"\"\n return self._thread.is_alive()\n\n def check_termination(self):\n \"\"\"Returns whether the checked thread was properly used and did terminate.\n\n Every checked thread should be \"join\"ed after starting, and before the\n test tears down. If it is not joined, it is possible the thread will hang\n and cause flaky failures in tests.\n\n Raises:\n self._testcase.failureException: If check_termination was called before\n thread was joined.\n\n RuntimeError: If the thread is not terminated. This means thread was not\n joined with the main thread.\n \"\"\"\n if self._is_thread_joined:\n if self.is_alive():\n raise RuntimeError(\n \"Thread was not joined with main thread, and is still running \"\n \"when the test finished.\")\n else:\n self._testcase.fail(\"A checked thread was not joined.\")\n\n def checkedThread(self, target, args=None, kwargs=None):\n \"\"\"Returns a Thread wrapper that asserts 'target' completes successfully.\n\n This method should be used to create all threads in test cases, as\n otherwise there is a risk that a thread will silently fail, and/or\n assertions made in the thread will not be respected.\n\n Args:\n target: A callable object to be executed in the thread.\n args: The argument tuple for the target invocation. Defaults to ().\n kwargs: A dictionary of keyword arguments for the target invocation.\n Defaults to {}.\n\n Returns:\n A wrapper for threading.Thread that supports start() and join() methods.\n \"\"\"\n ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)\n self._threads.append(ret)\n return ret\n\n # pylint: enable=invalid-name\n @py_func_if_in_function\n def assertNear(self, f1, f2, err, msg=None):\n \"\"\"Asserts that two floats are near each other.\n\n Checks that |f1 - f2| < err and asserts a test failure\n if not.\n\n Args:\n f1: A float value.\n f2: A float value.\n err: A float value.\n msg: An optional string message to append to the failure message.\n \"\"\"\n # f1 == f2 is needed here as we might have: f1, f2 = inf, inf\n self.assertTrue(\n f1 == f2 or math.fabs(f1 - f2) <= err, \"%f != %f +/- %f%s\" %\n (f1, f2, err, \" (%s)\" % msg if msg is not None else \"\"))\n\n @py_func_if_in_function\n def assertArrayNear(self, farray1, farray2, err, msg=None):\n \"\"\"Asserts that two float arrays are near each other.\n\n Checks that for all elements of farray1 and farray2\n |f1 - f2| < err. Asserts a test failure if not.\n\n Args:\n farray1: a list of float values.\n farray2: a list of float values.\n err: a float value.\n msg: Optional message to report on failure.\n \"\"\"\n self.assertEqual(len(farray1), len(farray2), msg=msg)\n for f1, f2 in zip(farray1, farray2):\n self.assertNear(float(f1), float(f2), err, msg=msg)\n\n def _NDArrayNear(self, ndarray1, ndarray2, err):\n return np.linalg.norm(ndarray1 - ndarray2) < err\n\n @py_func_if_in_function\n def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):\n \"\"\"Asserts that two numpy arrays have near values.\n\n Args:\n ndarray1: a numpy ndarray.\n ndarray2: a numpy ndarray.\n err: a float. The maximum absolute difference allowed.\n msg: Optional message to report on failure.\n \"\"\"\n self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)\n\n def _GetNdArray(self, a):\n # If a is tensor-like then convert it to ndarray\n if tensor_util.is_tensor(a):\n if isinstance(a, ops._EagerTensorBase):\n a = a.numpy()\n else:\n a = self.evaluate(a)\n if not isinstance(a, np.ndarray):\n return np.array(a)\n return a\n\n def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):\n a = self._GetNdArray(a)\n b = self._GetNdArray(b)\n # When the array rank is small, print its contents. Numpy array printing is\n # implemented using inefficient recursion so prints can cause tests to\n # time out.\n if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):\n shape_mismatch_msg = (\"Shape mismatch: expected %s, got %s with contents \"\n \"%s.\") % (a.shape, b.shape, b)\n else:\n shape_mismatch_msg = \"Shape mismatch: expected %s, got %s.\" % (a.shape,\n b.shape)\n self.assertEqual(a.shape, b.shape, shape_mismatch_msg)\n\n msgs = [msg]\n # np.allclose does not always work for our custom bfloat16 extension type\n # when type promotions are involved, so we first cast any bfloat16 arrays\n # to float32.\n a_dtype = a.dtype\n a = a.astype(np.float32) if a.dtype == dtypes.bfloat16.as_numpy_dtype else a\n b = b.astype(np.float32) if b.dtype == dtypes.bfloat16.as_numpy_dtype else b\n if not np.allclose(a, b, rtol=rtol, atol=atol):\n # Adds more details to np.testing.assert_allclose.\n #\n # NOTE: numpy.allclose (and numpy.testing.assert_allclose)\n # checks whether two arrays are element-wise equal within a\n # tolerance. The relative difference (rtol * abs(b)) and the\n # absolute difference atol are added together to compare against\n # the absolute difference between a and b. Here, we want to\n # tell user which elements violate such conditions.\n cond = np.logical_or(\n np.abs(a - b) > atol + rtol * np.abs(b),\n np.isnan(a) != np.isnan(b))\n if a.ndim:\n x = a[np.where(cond)]\n y = b[np.where(cond)]\n msgs.append(\"not close where = {}\".format(np.where(cond)))\n else:\n # np.where is broken for scalars\n x, y = a, b\n msgs.append(\"not close lhs = {}\".format(x))\n msgs.append(\"not close rhs = {}\".format(y))\n msgs.append(\"not close dif = {}\".format(np.abs(x - y)))\n msgs.append(\"not close tol = {}\".format(atol + rtol * np.abs(y)))\n msgs.append(\"dtype = {}, shape = {}\".format(a_dtype, a.shape))\n # TODO(xpan): There seems to be a bug:\n # tensorflow/compiler/tests:binary_ops_test pass with float32\n # nan even though the equal_nan is False by default internally.\n np.testing.assert_allclose(\n a, b, rtol=rtol, atol=atol, err_msg=\"\\n\".join(msgs), equal_nan=True)\n\n def _assertAllCloseRecursive(self,\n a,\n b,\n rtol=1e-6,\n atol=1e-6,\n path=None,\n msg=None):\n path = path or []\n path_str = ((\"[\" + \"][\".join(str(p) for p in path) + \"]\") if path else \"\")\n msg = msg if msg else \"\"\n\n # Check if a and/or b are namedtuples.\n if hasattr(a, \"_asdict\"):\n a = a._asdict()\n if hasattr(b, \"_asdict\"):\n b = b._asdict()\n a_is_dict = isinstance(a, collections_abc.Mapping)\n if a_is_dict != isinstance(b, collections_abc.Mapping):\n raise ValueError(\"Can't compare dict to non-dict, a%s vs b%s. %s\" %\n (path_str, path_str, msg))\n if a_is_dict:\n self.assertItemsEqual(\n a.keys(),\n b.keys(),\n msg=\"mismatched keys: a%s has keys %s, but b%s has keys %s. %s\" %\n (path_str, a.keys(), path_str, b.keys(), msg))\n for k in a:\n path.append(k)\n self._assertAllCloseRecursive(\n a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)\n del path[-1]\n elif isinstance(a, (list, tuple)):\n # Try to directly compare a, b as ndarrays; if not work, then traverse\n # through the sequence, which is more expensive.\n try:\n a_as_ndarray = self._GetNdArray(a)\n b_as_ndarray = self._GetNdArray(b)\n self._assertArrayLikeAllClose(\n a_as_ndarray,\n b_as_ndarray,\n rtol=rtol,\n atol=atol,\n msg=\"Mismatched value: a%s is different from b%s. %s\" %\n (path_str, path_str, msg))\n except (ValueError, TypeError) as e:\n if len(a) != len(b):\n raise ValueError(\n \"Mismatched length: a%s has %d items, but b%s has %d items. %s\" %\n (path_str, len(a), path_str, len(b), msg))\n for idx, (a_ele, b_ele) in enumerate(zip(a, b)):\n path.append(str(idx))\n self._assertAllCloseRecursive(\n a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)\n del path[-1]\n # a and b are ndarray like objects\n else:\n try:\n self._assertArrayLikeAllClose(\n a,\n b,\n rtol=rtol,\n atol=atol,\n msg=(\"Mismatched value: a%s is different from b%s. %s\" %\n (path_str, path_str, msg)))\n except TypeError as e:\n msg = (\"Error: a%s has %s, but b%s has %s. %s\" %\n (path_str, type(a), path_str, type(b), msg))\n e.args = ((e.args[0] + \" : \" + msg,) + e.args[1:])\n raise\n\n @py_func_if_in_function\n def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):\n \"\"\"Asserts that two structures of numpy arrays or Tensors, have near values.\n\n `a` and `b` can be arbitrarily nested structures. A layer of a nested\n structure can be a `dict`, `namedtuple`, `tuple` or `list`.\n\n Note: the implementation follows\n [`numpy.allclose`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html)\n (and numpy.testing.assert_allclose). It checks whether two arrays are\n element-wise equal within a tolerance. The relative difference\n (`rtol * abs(b)`) and the absolute difference `atol` are added together\n to compare against the absolute difference between `a` and `b`.\n\n Args:\n a: The expected numpy `ndarray`, or anything that can be converted into a\n numpy `ndarray` (including Tensor), or any arbitrarily nested of\n structure of these.\n b: The actual numpy `ndarray`, or anything that can be converted into a\n numpy `ndarray` (including Tensor), or any arbitrarily nested of\n structure of these.\n rtol: relative tolerance.\n atol: absolute tolerance.\n msg: Optional message to report on failure.\n\n Raises:\n ValueError: if only one of `a[p]` and `b[p]` is a dict or\n `a[p]` and `b[p]` have different length, where `[p]` denotes a path\n to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and\n `[p] = [1]['d']`, then `a[p] = (6, 7)`.\n \"\"\"\n if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):\n return self._assertRaggedClose(a, b, rtol, atol, msg)\n self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)\n\n @py_func_if_in_function\n def assertAllCloseAccordingToType(self,\n a,\n b,\n rtol=1e-6,\n atol=1e-6,\n float_rtol=1e-6,\n float_atol=1e-6,\n half_rtol=1e-3,\n half_atol=1e-3,\n bfloat16_rtol=1e-2,\n bfloat16_atol=1e-2,\n msg=None):\n \"\"\"Like assertAllClose, but also suitable for comparing fp16 arrays.\n\n In particular, the tolerance is reduced to 1e-3 if at least\n one of the arguments is of type float16.\n\n Args:\n a: the expected numpy ndarray or anything can be converted to one.\n b: the actual numpy ndarray or anything can be converted to one.\n rtol: relative tolerance.\n atol: absolute tolerance.\n float_rtol: relative tolerance for float32.\n float_atol: absolute tolerance for float32.\n half_rtol: relative tolerance for float16.\n half_atol: absolute tolerance for float16.\n bfloat16_rtol: relative tolerance for bfloat16.\n bfloat16_atol: absolute tolerance for bfloat16.\n msg: Optional message to report on failure.\n \"\"\"\n a = self._GetNdArray(a)\n b = self._GetNdArray(b)\n # types with lower tol are put later to overwrite previous ones.\n if (a.dtype == np.float32 or b.dtype == np.float32 or\n a.dtype == np.complex64 or b.dtype == np.complex64):\n rtol = max(rtol, float_rtol)\n atol = max(atol, float_atol)\n if a.dtype == np.float16 or b.dtype == np.float16:\n rtol = max(rtol, half_rtol)\n atol = max(atol, half_atol)\n if (a.dtype == dtypes.bfloat16.as_numpy_dtype or\n b.dtype == dtypes.bfloat16.as_numpy_dtype):\n rtol = max(rtol, bfloat16_rtol)\n atol = max(atol, bfloat16_atol)\n\n self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)\n\n @py_func_if_in_function\n def assertNotAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):\n \"\"\"Assert that two numpy arrays, or Tensors, do not have near values.\n\n Args:\n a: The expected numpy `ndarray`, or anything that can be converted into a\n numpy `ndarray` (including Tensor), or any arbitrarily nested of\n structure of these.\n b: The actual numpy `ndarray`, or anything that can be converted into a\n numpy `ndarray` (including Tensor), or any arbitrarily nested of\n structure of these.\n rtol: relative tolerance.\n atol: absolute tolerance.\n msg: Optional message to report on failure.\n\n Raises:\n AssertionError: If `a` and `b` are unexpectedly close at all elements.\n \"\"\"\n try:\n self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)\n except AssertionError:\n return\n msg = msg or \"\"\n raise AssertionError(\"The two values are close at all elements. %s\" % msg)\n\n @py_func_if_in_function\n def assertAllEqual(self, a, b, msg=None):\n \"\"\"Asserts that two numpy arrays or Tensors have the same values.\n\n Args:\n a: the expected numpy ndarray or anything can be converted to one.\n b: the actual numpy ndarray or anything can be converted to one.\n msg: Optional message to report on failure.\n \"\"\"\n if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):\n return self._assertRaggedEqual(a, b, msg)\n msg = msg if msg else \"\"\n a = self._GetNdArray(a)\n b = self._GetNdArray(b)\n # Arbitrary bounds so that we don't print giant tensors.\n if (b.ndim <= 3 or b.size < 500):\n self.assertEqual(\n a.shape, b.shape, \"Shape mismatch: expected %s, got %s.\"\n \" Contents: %r. \\n%s.\" % (a.shape, b.shape, b, msg))\n else:\n self.assertEqual(\n a.shape, b.shape, \"Shape mismatch: expected %s, got %s.\"\n \" %s\" % (a.shape, b.shape, msg))\n\n same = (a == b)\n\n if (a.dtype in [\n np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype\n ]):\n same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))\n msgs = [msg]\n if not np.all(same):\n # Adds more details to np.testing.assert_array_equal.\n diff = np.logical_not(same)\n if a.ndim:\n x = a[np.where(diff)]\n y = b[np.where(diff)]\n msgs.append(\"not equal where = {}\".format(np.where(diff)))\n else:\n # np.where is broken for scalars\n x, y = a, b\n msgs.append(\"not equal lhs = %r\" % x)\n msgs.append(\"not equal rhs = %r\" % y)\n\n # Handle mixed string types as a result of PY2to3 migration. That is, the\n # mixing between bytes (b-prefix strings, PY2 default) and unicodes\n # (u-prefix strings, PY3 default).\n if six.PY3:\n if (a.dtype.kind != b.dtype.kind and\n {a.dtype.kind, b.dtype.kind}.issubset({\"U\", \"S\", \"O\"})):\n a_list = []\n b_list = []\n # OK to flatten `a` and `b` because they are guaranteed to have the\n # same shape.\n for out_list, flat_arr in [(a_list, a.flat), (b_list, b.flat)]:\n for item in flat_arr:\n if isinstance(item, str):\n out_list.append(item.encode(\"utf-8\"))\n else:\n out_list.append(item)\n a = np.array(a_list)\n b = np.array(b_list)\n\n np.testing.assert_array_equal(a, b, err_msg=\"\\n\".join(msgs))\n\n @py_func_if_in_function\n def assertNotAllEqual(self, a, b, msg=None):\n \"\"\"Asserts that two numpy arrays or Tensors do not have the same values.\n\n Args:\n a: the expected numpy ndarray or anything can be converted to one.\n b: the actual numpy ndarray or anything can be converted to one.\n msg: Optional message to report on failure.\n \"\"\"\n try:\n self.assertAllEqual(a, b)\n except AssertionError:\n return\n raise AssertionError(\"The two values are equal at all elements. %s\" % msg)\n\n @py_func_if_in_function\n def assertAllGreater(self, a, comparison_target):\n \"\"\"Assert element values are all greater than a target value.\n\n Args:\n a: The numpy `ndarray`, or anything that can be converted into a numpy\n `ndarray` (including Tensor).\n comparison_target: The target value of comparison.\n \"\"\"\n a = self._GetNdArray(a)\n self.assertGreater(np.min(a), comparison_target)\n\n @py_func_if_in_function\n def assertAllLess(self, a, comparison_target):\n \"\"\"Assert element values are all less than a target value.\n\n Args:\n a: The numpy `ndarray`, or anything that can be converted into a numpy\n `ndarray` (including Tensor).\n comparison_target: The target value of comparison.\n \"\"\"\n a = self._GetNdArray(a)\n self.assertLess(np.max(a), comparison_target)\n\n @py_func_if_in_function\n def assertAllGreaterEqual(self, a, comparison_target):\n \"\"\"Assert element values are all greater than or equal to a target value.\n\n Args:\n a: The numpy `ndarray`, or anything that can be converted into a numpy\n `ndarray` (including Tensor).\n comparison_target: The target value of comparison.\n \"\"\"\n a = self._GetNdArray(a)\n self.assertGreaterEqual(np.min(a), comparison_target)\n\n @py_func_if_in_function\n def assertAllLessEqual(self, a, comparison_target):\n \"\"\"Assert element values are all less than or equal to a target value.\n\n Args:\n a: The numpy `ndarray`, or anything that can be converted into a numpy\n `ndarray` (including Tensor).\n comparison_target: The target value of comparison.\n \"\"\"\n a = self._GetNdArray(a)\n self.assertLessEqual(np.max(a), comparison_target)\n\n def _format_subscripts(self, subscripts, value, limit=10, indent=2):\n \"\"\"Generate a summary of ndarray subscripts as a list of str.\n\n If limit == N, this method will print up to the first N subscripts on\n separate\n lines. A line of ellipses (...) will be appended at the end if the number of\n subscripts exceeds N.\n\n Args:\n subscripts: The tensor (np.ndarray) subscripts, of the same format as\n np.where()'s return value, i.e., a tuple of arrays with each array\n corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).\n value: (np.ndarray) value of the tensor.\n limit: (int) The maximum number of indices to print.\n indent: (int) Number of characters to indent at the beginning of each\n line.\n\n Returns:\n (list of str) the multi-line representation of the subscripts and values,\n potentially with omission at the end.\n \"\"\"\n lines = []\n subscripts = np.transpose(subscripts)\n prefix = \" \" * indent\n if np.ndim(value) == 0:\n return [prefix + \"[0] : \" + str(value)]\n for subscript in itertools.islice(subscripts, limit):\n lines.append(prefix + str(subscript) + \" : \" +\n str(value[tuple(subscript)]))\n if len(subscripts) > limit:\n lines.append(prefix + \"...\")\n return lines\n\n @py_func_if_in_function\n def assertAllInRange(self,\n target,\n lower_bound,\n upper_bound,\n open_lower_bound=False,\n open_upper_bound=False):\n \"\"\"Assert that elements in a Tensor are all in a given range.\n\n Args:\n target: The numpy `ndarray`, or anything that can be converted into a\n numpy `ndarray` (including Tensor).\n lower_bound: lower bound of the range\n upper_bound: upper bound of the range\n open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather\n than the default >=)\n open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather\n than the default <=)\n\n Raises:\n AssertionError:\n if the value tensor does not have an ordered numeric type (float* or\n int*), or\n if there are nan values, or\n if any of the elements do not fall in the specified range.\n \"\"\"\n target = self._GetNdArray(target)\n if not (np.issubdtype(target.dtype, np.floating) or\n np.issubdtype(target.dtype, np.integer)):\n raise AssertionError(\n \"The value of %s does not have an ordered numeric type, instead it \"\n \"has type: %s\" % (target, target.dtype))\n\n nan_subscripts = np.where(np.isnan(target))\n if np.size(nan_subscripts):\n raise AssertionError(\n \"%d of the %d element(s) are NaN. \"\n \"Subscripts(s) and value(s) of the NaN element(s):\\n\" %\n (len(nan_subscripts[0]), np.size(target)) +\n \"\\n\".join(self._format_subscripts(nan_subscripts, target)))\n\n range_str = ((\"(\" if open_lower_bound else \"[\") + str(lower_bound) + \", \" +\n str(upper_bound) + (\")\" if open_upper_bound else \"]\"))\n\n violations = (\n np.less_equal(target, lower_bound) if open_lower_bound else np.less(\n target, lower_bound))\n violations = np.logical_or(\n violations,\n np.greater_equal(target, upper_bound)\n if open_upper_bound else np.greater(target, upper_bound))\n violation_subscripts = np.where(violations)\n if np.size(violation_subscripts):\n raise AssertionError(\n \"%d of the %d element(s) are outside the range %s. \" %\n (len(violation_subscripts[0]), np.size(target), range_str) +\n \"Subscript(s) and value(s) of the offending elements:\\n\" +\n \"\\n\".join(self._format_subscripts(violation_subscripts, target)))\n\n @py_func_if_in_function\n def assertAllInSet(self, target, expected_set):\n \"\"\"Assert that elements of a Tensor are all in a given closed set.\n\n Args:\n target: The numpy `ndarray`, or anything that can be converted into a\n numpy `ndarray` (including Tensor).\n expected_set: (`list`, `tuple` or `set`) The closed set that the elements\n of the value of `target` are expected to fall into.\n\n Raises:\n AssertionError:\n if any of the elements do not fall into `expected_set`.\n \"\"\"\n target = self._GetNdArray(target)\n\n # Elements in target that are not in expected_set.\n diff = np.setdiff1d(target.flatten(), list(expected_set))\n if np.size(diff):\n raise AssertionError(\"%d unique element(s) are not in the set %s: %s\" %\n (np.size(diff), expected_set, diff))\n\n @py_func_if_in_function\n def assertDTypeEqual(self, target, expected_dtype):\n \"\"\"Assert ndarray data type is equal to expected.\n\n Args:\n target: The numpy `ndarray`, or anything that can be converted into a\n numpy `ndarray` (including Tensor).\n expected_dtype: Expected data type.\n \"\"\"\n target = self._GetNdArray(target)\n if not isinstance(target, list):\n arrays = [target]\n for arr in arrays:\n self.assertEqual(arr.dtype, expected_dtype)\n\n # pylint: disable=g-doc-return-or-yield\n @contextlib.contextmanager\n def assertRaisesWithPredicateMatch(self, exception_type,\n expected_err_re_or_predicate):\n \"\"\"Returns a context manager to enclose code expected to raise an exception.\n\n If the exception is an OpError, the op stack is also included in the message\n predicate search.\n\n Args:\n exception_type: The expected type of exception that should be raised.\n expected_err_re_or_predicate: If this is callable, it should be a function\n of one argument that inspects the passed-in exception and returns True\n (success) or False (please fail the test). Otherwise, the error message\n is expected to match this regular expression partially.\n\n Returns:\n A context manager to surround code that is expected to raise an\n exception.\n \"\"\"\n if callable(expected_err_re_or_predicate):\n predicate = expected_err_re_or_predicate\n else:\n\n def predicate(e):\n err_str = e.message if isinstance(e, errors.OpError) else str(e)\n op = e.op if isinstance(e, errors.OpError) else None\n while op is not None:\n err_str += \"\\nCaused by: \" + op.name\n op = op._original_op # pylint: disable=protected-access\n logging.info(\"Searching within error strings: '%s' within '%s'\",\n expected_err_re_or_predicate, err_str)\n return re.search(expected_err_re_or_predicate, err_str)\n\n try:\n yield\n self.fail(exception_type.__name__ + \" not raised\")\n except Exception as e: # pylint: disable=broad-except\n if not isinstance(e, exception_type) or not predicate(e):\n raise AssertionError(\"Exception of type %s: %s\" %\n (str(type(e)), str(e)))\n\n # pylint: enable=g-doc-return-or-yield\n\n def assertRaisesOpError(self, expected_err_re_or_predicate):\n return self.assertRaisesWithPredicateMatch(errors.OpError,\n expected_err_re_or_predicate)\n\n def assertShapeEqual(self, np_array, tf_tensor, msg=None):\n \"\"\"Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.\n\n Args:\n np_array: A Numpy ndarray or Numpy scalar.\n tf_tensor: A Tensor.\n msg: Optional message to report on failure.\n\n Raises:\n TypeError: If the arguments have the wrong type.\n \"\"\"\n if not isinstance(np_array, (np.ndarray, np.generic)):\n raise TypeError(\"np_array must be a Numpy ndarray or Numpy scalar\")\n if not isinstance(tf_tensor, ops.Tensor):\n raise TypeError(\"tf_tensor must be a Tensor\")\n self.assertAllEqual(\n np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)\n\n def assertDeviceEqual(self, device1, device2, msg=None):\n \"\"\"Asserts that the two given devices are the same.\n\n Args:\n device1: A string device name or TensorFlow `DeviceSpec` object.\n device2: A string device name or TensorFlow `DeviceSpec` object.\n msg: Optional message to report on failure.\n \"\"\"\n device1 = pydev.canonical_name(device1)\n device2 = pydev.canonical_name(device2)\n self.assertEqual(\n device1, device2,\n \"Devices %s and %s are not equal. %s\" % (device1, device2, msg))\n\n def _GetPyList(self, a):\n \"\"\"Converts `a` to a nested python list.\"\"\"\n if isinstance(a, ragged_tensor.RaggedTensor):\n return self.evaluate(a).to_list()\n elif isinstance(a, ops.Tensor):\n a = self.evaluate(a)\n return a.tolist() if isinstance(a, np.ndarray) else a\n elif isinstance(a, np.ndarray):\n return a.tolist()\n elif isinstance(a, ragged_tensor_value.RaggedTensorValue):\n return a.to_list()\n else:\n return np.array(a).tolist()\n\n def _assertRaggedEqual(self, a, b, msg):\n \"\"\"Asserts that two ragged tensors are equal.\"\"\"\n a_list = self._GetPyList(a)\n b_list = self._GetPyList(b)\n self.assertEqual(a_list, b_list, msg)\n\n if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):\n a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0\n b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0\n self.assertEqual(a_ragged_rank, b_ragged_rank, msg)\n\n def _assertRaggedClose(self, a, b, rtol, atol, msg=None):\n a_list = self._GetPyList(a)\n b_list = self._GetPyList(b)\n self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)\n\n if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):\n a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0\n b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0\n self.assertEqual(a_ragged_rank, b_ragged_rank, msg)\n\n def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path=\"value\"):\n self.assertEqual(type(a), type(b))\n if isinstance(a, (list, tuple)):\n self.assertLen(a, len(b), \"Length differs for %s\" % path)\n for i in range(len(a)):\n self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,\n \"%s[%s]\" % (path, i))\n else:\n self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)\n\n # Fix Python 3+ compatibility issues\n if not six.PY2:\n # pylint: disable=invalid-name\n\n # Silence a deprecation warning\n assertRaisesRegexp = googletest.TestCase.assertRaisesRegex\n\n # assertItemsEqual is assertCountEqual as of 3.2.\n assertItemsEqual = googletest.TestCase.assertCountEqual\n\n # pylint: enable=invalid-name\n\n @contextlib.contextmanager\n def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):\n \"\"\"Set the session and its graph to global default and constrain devices.\"\"\"\n if context.executing_eagerly():\n yield None\n else:\n with sess.graph.as_default(), sess.as_default():\n if force_gpu:\n # Use the name of an actual device if one is detected, or\n # '/device:GPU:0' otherwise\n gpu_name = gpu_device_name()\n if not gpu_name:\n gpu_name = \"/device:GPU:0\"\n with sess.graph.device(gpu_name):\n yield sess\n elif use_gpu:\n yield sess\n else:\n with sess.graph.device(\"/device:CPU:0\"):\n yield sess\n\n def _create_session(self, graph, config, force_gpu):\n \"\"\"See session() for details.\"\"\"\n\n def prepare_config(config):\n \"\"\"Returns a config for sessions.\n\n Args:\n config: An optional config_pb2.ConfigProto to use to configure the\n session.\n\n Returns:\n A config_pb2.ConfigProto object.\n \"\"\"\n # TODO(b/114333779): Enforce allow_soft_placement=False when\n # use_gpu=False. Currently many tests rely on the fact that any device\n # will be used even when a specific device is supposed to be used.\n allow_soft_placement = not force_gpu\n if config is None:\n config = context.context().config\n config.allow_soft_placement = allow_soft_placement\n elif not allow_soft_placement and config.allow_soft_placement:\n config_copy = context.context().config\n config = config_copy\n config.allow_soft_placement = False\n # Don't perform optimizations for tests so we don't inadvertently run\n # gpu ops on cpu\n config.graph_options.optimizer_options.opt_level = -1\n # Disable Grappler constant folding since some tests & benchmarks\n # use constant input and become meaningless after constant folding.\n # DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE\n # GRAPPLER TEAM.\n config.graph_options.rewrite_options.constant_folding = (\n rewriter_config_pb2.RewriterConfig.OFF)\n config.graph_options.rewrite_options.pin_to_host_optimization = (\n rewriter_config_pb2.RewriterConfig.OFF)\n return config\n\n return ErrorLoggingSession(graph=graph, config=prepare_config(config))\n\n def _get_cached_session(self,\n graph=None,\n config=None,\n force_gpu=False,\n crash_if_inconsistent_args=True):\n \"\"\"See cached_session() for documentation.\"\"\"\n if self._cached_session is None:\n sess = self._create_session(\n graph=graph, config=config, force_gpu=force_gpu)\n self._cached_session = sess\n self._cached_graph = graph\n self._cached_config = config\n self._cached_force_gpu = force_gpu\n return sess\n else:\n if crash_if_inconsistent_args and self._cached_graph is not graph:\n raise ValueError(\"The graph used to get the cached session is \"\n \"different than the one that was used to create the \"\n \"session. Maybe create a new session with \"\n \"self.session()\")\n if crash_if_inconsistent_args and self._cached_config is not config:\n raise ValueError(\"The config used to get the cached session is \"\n \"different than the one that was used to create the \"\n \"session. Maybe create a new session with \"\n \"self.session()\")\n if crash_if_inconsistent_args and (self._cached_force_gpu is\n not force_gpu):\n raise ValueError(\n \"The force_gpu value used to get the cached session is \"\n \"different than the one that was used to create the \"\n \"session. Maybe create a new session with \"\n \"self.session()\")\n return self._cached_session\n\n\n@tf_export(\"test.create_local_cluster\")\ndef create_local_cluster(num_workers,\n num_ps,\n protocol=\"grpc\",\n worker_config=None,\n ps_config=None):\n \"\"\"Create and start local servers and return the associated `Server` objects.\n\n \"PS\" stands for \"parameter server\": a task responsible for storing and\n updating the model's parameters. Other tasks send updates to these parameters\n as they work on optimizing the parameters. This particular division of labor\n between tasks is not required, but is common for distributed training.\n\n Read more at https://www.tensorflow.org/guide/extend/architecture\n\n \n\n\n Figure illustrates the interaction of these components.\n \"/job:worker/task:0\" and \"/job:ps/task:0\" are both tasks with worker services.\n\n\n Example:\n ```python\n workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)\n\n worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]\n\n with tf.device(\"/job:ps/task:0\"):\n ...\n with tf.device(\"/job:ps/task:1\"):\n ...\n with tf.device(\"/job:worker/task:0\"):\n ...\n with tf.device(\"/job:worker/task:1\"):\n ...\n\n worker_sessions[0].run(...)\n ```\n\n Args:\n num_workers: Number of worker servers to start.\n num_ps: Number of PS servers to start.\n protocol: Communication protocol. Allowed values are documented in the\n documentation of `tf.distribute.Server`.\n worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be\n used to instantiate multiple devices etc.\n ps_config: (optional) `tf.ConfigProto` to initialize PS servers.\n\n Returns:\n A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list\n of `num_workers` objects of type `tf.distribute.Server` (all running\n locally);\n and `ps_servers` is a list of `num_ps` objects of similar type.\n\n Raises:\n ImportError: if portpicker module was not found at load time\n \"\"\"\n import portpicker # pylint: disable=g-import-not-at-top\n worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]\n ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]\n cluster_dict = {\n \"worker\": [\"localhost:%s\" % port for port in worker_ports],\n \"ps\": [\"localhost:%s\" % port for port in ps_ports]\n }\n cs = server_lib.ClusterSpec(cluster_dict)\n\n workers = [\n server_lib.Server(\n cs,\n job_name=\"worker\",\n protocol=protocol,\n task_index=ix,\n config=worker_config,\n start=True) for ix in range(num_workers)\n ]\n ps_servers = [\n server_lib.Server(\n cs,\n job_name=\"ps\",\n protocol=protocol,\n task_index=ix,\n config=ps_config,\n start=True) for ix in range(num_ps)\n ]\n\n return workers, ps_servers\n\n\ndef get_node_def_from_graph(node_name, graph_def):\n \"\"\"Returns the `NodeDef` instance for given node name in the graph def.\n\n This method explores only the NodeDefs in `graph_def.node`.\n\n Args:\n node_name: Name of the NodeDef to search for.\n graph_def: An instance of `GraphDef` proto.\n\n Returns:\n the `NodeDef` instance whose name field matches the given node_name or None.\n \"\"\"\n for node_def in graph_def.node:\n if node_def.name == node_name:\n return node_def\n return None\n\n\ndef set_producer_version(graph, producer_version):\n \"\"\"Sets graph.graph_def_versions.producer to `producer_version`.\"\"\"\n # The C API doesn't expose altering GraphDefVersions. We can indirectly set\n # it via import_graph_def though.\n graph_def = graph_pb2.GraphDef()\n graph_def.versions.producer = producer_version\n with graph.as_default():\n importer.import_graph_def(graph_def)\n assert graph.graph_def_versions.producer, producer_version\n\n\[email protected]\ndef _fake_gradient_tape_context_manager():\n \"\"\"tf.gradients(...) implemented as tf.GradientTape context manager interface.\n\n This is useful to test tf.gradients() in tests that uses tf.GradientTape().\n\n Yields:\n gradient tape instance that's implemented by tf.gradients() underneath.\n \"\"\"\n try:\n class FakeGradientTape:\n\n def watch(self, x):\n pass\n\n def gradient(self, y, x, grad_ys=None):\n result = gradients_impl.gradients(y, x, grad_ys)\n\n # Unlike `tape.gradient()`, `tf.gradients()` returns a list for a single\n # element. So unpack if needed to match `tape.gradient()` behavior.\n if not isinstance(x, (list, tuple)):\n assert len(result) == 1\n return result[0]\n\n return result\n\n yield FakeGradientTape()\n finally:\n pass\n\n\nclass AbstractGradientTape:\n \"\"\"Abstract GradientTape context manager that has multiple implementations.\n\n This is useful to test both tf.GradientTape() and tf.gradients() without\n duplicating tests.\n \"\"\"\n\n def __init__(self, use_tape, persistent=False):\n self._use_tape = use_tape\n self._persistent = persistent\n\n def __enter__(self):\n if self._use_tape:\n self._tape_impl = backprop.GradientTape(persistent=self._persistent)\n else:\n self._tape_impl = _fake_gradient_tape_context_manager()\n return self._tape_impl.__enter__()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self._tape_impl.__exit__(exc_type, exc_val, exc_tb)\n\n\[email protected]\ndef run_functions_eagerly(run_eagerly):\n \"\"\"Runs functions eagerly if `run_eagerly` is true.\n\n WARNING: Setting `run_eagerly` to True in tests running in V1 graph mode\n *WILL NOT* make the tf.function to run eagerly because eager is disabled by\n default in V1. Instead, tf.function will run as a traced graph function.\n\n Ensures that the state (for running functions eagerly) is back to the initial\n `def_function.RUN_FUNCTIONS_EAGERLY` state.\n\n Args:\n run_eagerly: Boolean determining whether to run the function eagerly or not.\n\n Raises:\n ValueError if `run_eagerly` is not a boolean.\n\n Yields:\n Nothing.\n \"\"\"\n if not isinstance(run_eagerly, bool):\n raise ValueError(\n \"Expected bool for `run_eagerly` but got {}\".format(run_eagerly))\n\n is_eager = context.executing_eagerly()\n if not is_eager and run_eagerly:\n logging.warning(\n \"Running tf.function eagerly in V1 graph mode is not supported. \"\n \"tf.function will be run as a traced graph function.\")\n\n initial_state = def_function.functions_run_eagerly()\n def_function.run_functions_eagerly(run_eagerly)\n try:\n yield\n finally:\n def_function.run_functions_eagerly(initial_state)\n"
] |
[
[
"tensorflow.python.util._pywrap_util_port.IsBuiltWithNvcc",
"numpy.all",
"tensorflow.python.util.tf_decorator.make_decorator",
"tensorflow.python.eager.context.context",
"tensorflow.python.client.pywrap_tf_session.TF_SetXlaAutoJitMode",
"numpy.where",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.util.protobuf.compare.assertProtoEqual",
"numpy.less_equal",
"tensorflow.python.training.server_lib.Server",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.framework.gpu_util.compute_capability_from_device_desc",
"tensorflow.python.framework.ops.dismantle_graph",
"tensorflow.python.framework.ops.reset_default_graph",
"tensorflow.python.util._pywrap_util_port.IsBuiltWithROCm",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.platform.tf_logging.warning",
"numpy.array",
"tensorflow.python.framework.random_seed.set_random_seed",
"tensorflow.python.util.protobuf.compare.ProtoEq",
"tensorflow.python.platform.googletest.GetTempDir",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.client.device_lib.list_local_devices",
"tensorflow.python.util.tf_inspect.ismodule",
"tensorflow.python.platform._pywrap_stacktrace_handler.InstallStacktraceHandler",
"tensorflow.python.ops.array_ops.transpose",
"numpy.allclose",
"numpy.less",
"tensorflow.python.eager.context.execution_mode",
"tensorflow.python.client.pywrap_tf_session.TF_SetXlaConstantFoldingDisabled",
"tensorflow.python.framework.ops.get_default_session",
"numpy.size",
"numpy.greater_equal",
"numpy.min",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.util.tf_inspect.isfunction",
"tensorflow.python.framework.is_mlir_bridge_test_true.is_mlir_bridge_enabled",
"numpy.ndim",
"tensorflow.python.util.tf_inspect.isclass",
"tensorflow.python.util._pywrap_util_port.IsGoogleCudaEnabled",
"tensorflow.python.framework.ops._default_graph_stack.reset",
"tensorflow.python.client.pywrap_tf_session.TF_SetXlaEnableLazyCompilation",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.util._pywrap_util_port.IsMklEnabled",
"tensorflow.python.platform.tf_logging.error",
"numpy.issubdtype",
"tensorflow.python.framework.config.tensor_float_32_execution_enabled",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.framework.config.enable_tensor_float_32_execution",
"tensorflow.python.eager.context.executing_eagerly",
"numpy.greater",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.util.tf_inspect.getframeinfo",
"tensorflow.python.eager.def_function.functions_run_eagerly",
"tensorflow.python.util.compat.as_str",
"numpy.logical_not",
"tensorflow.python.client.pywrap_tf_session.TF_SetXlaMinClusterSize",
"numpy.isnan",
"tensorflow.python.framework.tfrt_utils.enabled",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.ops.ragged.ragged_tensor.is_ragged",
"tensorflow.python.framework.device.canonical_name",
"numpy.transpose",
"tensorflow.python.util._pywrap_util_port.IsBuiltWithXLA",
"tensorflow.python.framework.sparse_tensor.is_sparse",
"tensorflow.python.framework.ops.Graph",
"numpy.linalg.norm",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.util.tf_inspect.isframe",
"tensorflow.python.tf2.enabled",
"tensorflow.python.framework.ops.get_collection_proto_type",
"numpy.max",
"tensorflow.python.framework.ops.device",
"tensorflow.python.util._pywrap_util_port.GpuSupportsHalfMatMulAndConv",
"tensorflow.python.framework.ops.inside_function",
"tensorflow.python.framework.is_xla_test_true.is_xla_enabled",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.framework.importer.import_graph_def",
"tensorflow.python.eager.tape.distribution_strategy_context.get_strategy",
"tensorflow.python.ops.script_ops.py_func",
"tensorflow.python.training.server_lib.ClusterSpec",
"tensorflow.python.framework.ops.has_default_graph",
"tensorflow.python.compat.compat.forward_compatibility_horizon",
"tensorflow.python.eager.def_function.run_functions_eagerly",
"numpy.abs",
"numpy.random.seed",
"tensorflow.python.client.pywrap_tf_session.TF_SetTfXlaCpuGlobalJit",
"tensorflow.python.client.pywrap_tf_session.TF_GetXlaConstantFoldingDisabled",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.core.framework.graph_pb2.GraphDef"
]
] |
SuziKim/DCCW
|
[
"b88b38c5d44bcb100f2d60c26a52c6268c446164"
] |
[
"dccw/views/applications.py"
] |
[
"from DCCWWebDemo.settings import MEDIA_ROOT\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.db.models import Q\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.utils.crypto import get_random_string\n\nimport os\nimport ast\nimport random\nfrom PIL import Image\nimport numpy as np\nimport base64\nimport json\nimport glob\nfrom json import JSONEncoder\nfrom scipy import sparse\nimport enum\n\nfrom colormath.color_objects import LabColor, sRGBColor\nfrom colormath.color_conversions import convert_color\n\nfrom dccw.models import *\nfrom dccw.forms import *\nfrom dccw.color_palette import *\nfrom dccw.color_palettes import *\n\nfrom dccw.single_palette_sorter import * \nfrom dccw.multiple_palettes_sorter import * \nfrom dccw.similarity_measurer import *\nfrom dccw.geo_sorter_helper import *\n\nfrom dccw.views import view_helper\nfrom dccw.tan_decomposing import *\n\n################################\n# For Recoloring\nUSE_OPENCL = False\ntry:\n import pyopencl_example\n if len( pyopencl_example.cl.get_platforms() ) > 0:\n USE_OPENCL = True\nexcept: pass\nprint( \"Using OpenCL:\", USE_OPENCL )\n################################\n\n\ndef paletteInterpolation(request):\n\tcolor_count = random.randint(5, 10)\n\n\tif request.method == \"POST\":\n\t\tif request.POST.get(\"random\"):\n\t\t\tcolor_palette = ColorPalette(auto_fetched=True, palette_length=color_count)\n\n\t\telif request.POST.get(\"load\"):\n\t\t\tpalette_hex_str = request.POST.get('hex_input') \n\n\t\t\tvalidation_result = view_helper.validate_input(palette_hex_str, 1)\n\t\t\tif not validation_result['result']:\n\t\t\t\tprint(\"[Error]\", validation_result[\"message\"])\n\t\t\t\tmessages.info(request, validation_result[\"message\"])\n\t\t\t\treturn render(request, 'paletteInterpolator.html')\n\n\t\t\tpalette_hex = palette_hex_str.split('#')[1:]\n\t\t\tpalette_hex = ['#' + h for h in palette_hex]\n\t\t\tprint(palette_hex)\n\t\t\tcolor_palette = ColorPalette(auto_fetched=False, colors=palette_hex)\n\t\telse:\n\t\t\tprint(\"[ERROR] No such post name..\")\n\n\telse:\n\t\t# For Replicability Stamp, initialize with palettes used in teaser image\n\t\tcolor_palette = ColorPalette(auto_fetched=False, colors=[\"#fae3e1\",\"#1a4646\",\"#fbbf8d\",\"#db904c\",\"#cacaee\",\"#329c99\"])\n\n\tcolor_sorter = SinglePaletteSorter(color_palette)\n\tsorted_indices_dccw = {}\n\tsorted_indices_hsv = {}\n\tsorted_indices_luminance = {}\n\t\n\tsorted_indices_dccw['orig'] = color_sorter.standard_sort()\n\tsorted_indices_hsv['orig'], _ = color_sorter.sort(SinglePaletteSortMode.HSV)\n\tsorted_indices_luminance['orig'], _ = color_sorter.sort(SinglePaletteSortMode.Luminance)\n\n\tsorted_indices_hsv['anchored'] = sorted_indices_hsv['orig']\n\tsorted_indices_dccw['anchored']\t= sorted_indices_dccw['orig'][sorted_indices_dccw['orig'].index(sorted_indices_hsv['anchored'][0]):] \\\n\t\t\t\t\t\t\t\t\t+ sorted_indices_dccw['orig'][:sorted_indices_dccw['orig'].index(sorted_indices_hsv['anchored'][0])]\n\tsorted_indices_luminance['anchored'] = sorted_indices_luminance['orig'][sorted_indices_luminance['orig'].index(sorted_indices_hsv['anchored'][0]):] \\\n\t\t\t\t\t\t\t\t\t+ sorted_indices_luminance['orig'][:sorted_indices_luminance['orig'].index(sorted_indices_hsv['anchored'][0])]\n\n\t# =========================\n\t# For Figures=============\n\tprint(' '.join(color_palette.to_hex_list()))\n\n\t# for key, lex_sorted_index in lex_sorted_indices.items():\n\t\t# sorted_print_hex = [color_palette.to_hex_list()[i] for i in lex_sorted_index['orig']]\n\t\t# print(key, ' '.join(sorted_print_hex))\n\n\tsorted_print_hex = [color_palette.to_hex_list()[i] for i in sorted_indices_dccw['orig']]\n\tprint(' '.join(sorted_print_hex))\n\n\tsorted_print_hex = [color_palette.to_hex_list()[i] for i in sorted_indices_hsv['orig']]\n\tprint(' '.join(sorted_print_hex))\n\n\tsorted_print_hex = [color_palette.to_hex_list()[i] for i in sorted_indices_luminance['orig']]\n\tprint(' '.join(sorted_print_hex))\n\t# For Figures=============\n\t# =========================\n\n\treturn render(request, 'paletteInterpolator.html', {\n\t'hex_data': color_palette.to_hex_list(),\n\t'sorted_indices_dccw': sorted_indices_dccw, \n\t'sorted_indices_luminance' : sorted_indices_luminance, \n\t'sorted_indices_hsv': sorted_indices_hsv, \n\t})\n\n\ndef paletteNavigation(request):\n\ttop_query_count = 3\n\n\tsource_palette = _get_random_palette_for_palette_navigator()\n\tsource_palette_hex_list = _make_a_list_from_serialized_hex_string(source_palette.hexes_list)\n\n\tdataset_names = ['palettes', 'Unsplash', 'mcs', 'wikiart-5', 'wikiart-10']\n\tquery_results = {}\n\n\tfor dataset_name in dataset_names:\n\t\tpalette_distance_candidates = PaletteNavigatorDistanceModel.objects.filter((Q(palette1=source_palette) & Q(palette2__data_source__startswith=dataset_name)) | (Q(palette2=source_palette) & Q(palette1__data_source__startswith=dataset_name))).order_by('distance')\n\n\t\tquery_result_array = []\n\t\tfor result_palette_distance in palette_distance_candidates[:top_query_count]:\n\t\t\ttarget_palette = result_palette_distance.palette1\n\t\t\tif result_palette_distance.palette1 == source_palette:\n\t\t\t\ttarget_palette = result_palette_distance.palette2\n\n\t\t\teach_result_dic = {}\n\t\t\teach_result_dic['id'] = target_palette.palette_id\n\t\t\teach_result_dic['sorted_hex'] = _make_a_list_from_serialized_hex_string(target_palette.hexes_list)\n\t\t\teach_result_dic['distance'] = result_palette_distance.distance\n\t\t\t\n\t\t\tquery_result_array.append(each_result_dic)\n\t\t\t\n\t\tquery_results[dataset_name] = query_result_array\n\n\treturn render(request, 'paletteNavigator.html', {\n\t\t'source_data': source_palette_hex_list,\n\t\t'query_results': query_results,\n\t})\n\n# https://pynative.com/python-serialize-numpy-ndarray-into-json/\nclass NumpyArrayEncoder(JSONEncoder):\n\tdef default(self, obj):\n\t\tif isinstance(obj, numpy.ndarray):\n\t\t\treturn obj.tolist()\n\t\treturn JSONEncoder.default(self, obj)\n\nclass RecoloringInterimResultMode(enum.Enum):\n\tsource_palette = {'id': 1, 'is_matrix': False}\n\timage_data = {'id': 2, 'is_matrix': False}\n\timage_size = {'id': 3, 'is_matrix': False}\n\tRGBXY_mixing_weights = {'id': 4, 'is_matrix': True}\n\nclass RecoloringResult(enum.Enum):\n\ttan_et_al = {'id': 1, 'tag': 'tan-et-al-2018'}\n\tours = {'id': 2, 'tag': 't2s-buffer'}\n\ndef _get_recoloring_file_names(mode):\n\timage_name = 'recoloring-%s.png' % mode.value['tag']\n\tpalette_image_name = 'input_palette-%s.png' % mode.value['tag']\n\treturn image_name, palette_image_name\n\ndef _get_interim_file_path(image_uid, mode):\n\textension = 'npz' if mode.value['is_matrix'] else 'txt'\n\treturn os.path.join(settings.MEDIA_ROOT, image_uid, '%s.%s' % (mode.name, extension))\n\ndef _serialize_and_save_numpy_array(image_uid, mode, np_array):\n\toutput_path = _get_interim_file_path(image_uid, mode)\n\tprint('serialize:', output_path)\n\twith open(output_path, 'w') as outfile:\n\t\tjson.dump({\"array\": np_array}, outfile, cls=NumpyArrayEncoder)\n\ndef _serialize_and_save_sparse_matrix(image_uid, mode, matrix):\n\toutput_path = _get_interim_file_path(image_uid, mode)\n\tprint('serialize:', output_path)\n\tsparse.save_npz(output_path, matrix, compressed=True)\n\ndef _deserialize_and_load_numpy_array(image_uid, mode):\n\t# print('deserialize: ', output_path)\n\toutput_path = _get_interim_file_path(image_uid, mode)\n\tresult_np_array = None\n\twith open(output_path) as serialized_array:\n\t\tresult_np_array = np.asarray(json.loads(serialized_array.read())['array'])\n\n\treturn result_np_array\n\ndef _deserialize_and_load_sparse_matrix(image_uid, mode):\n\t# print('deserialize: ', output_path)\n\toutput_path = _get_interim_file_path(image_uid, mode)\n\treturn sparse.load_npz(output_path)\n\ndef _data_exist(image_uid):\n\treturn os.path.isfile(_get_interim_file_path(image_uid, RecoloringInterimResultMode.source_palette)) and \\\n\t\tos.path.isfile(_get_interim_file_path(image_uid, RecoloringInterimResultMode.image_data)) and \\\n\t\tos.path.isfile(_get_interim_file_path(image_uid, RecoloringInterimResultMode.image_size)) and \\\n\t\tos.path.isfile(_get_interim_file_path(image_uid, RecoloringInterimResultMode.RGBXY_mixing_weights))\n\n\ndef imageRecoloring(request):\n\tfile_form = ImageRecoloringForm(request.POST, request.FILES)\n\n\tif request.method == 'POST':\n\t\tif request.FILES.get('source_image'):\n\t\t\tif file_form.is_valid():\n\t\t\t\timage_uid = get_random_string(length=32)\n\t\t\t\toutput_dir_path = os.path.join(settings.MEDIA_ROOT, image_uid)\n\t\t\t\tos.makedirs(output_dir_path, exist_ok=True)\n\n\t\t\t\tinput_image = Image.open(file_form.cleaned_data['source_image'])\n\t\t\t\tinput_image.save(os.path.join(output_dir_path, 'source_image.png'), optimize=False, quality=100)\n\t\t\t\tnp_input_image = np.asfarray(input_image.convert('RGBA'))\n\t\t\t\t\n\t\t\t\trequest.session['image_uid'] = image_uid\n\t\t\t\t\n\t\t\t\tif _data_exist(output_dir_path):\n\t\t\t\t\tsource_palette = _deserialize_and_load_numpy_array(image_uid, RecoloringInterimResultMode.source_palette)\n\t\t\t\telse:\n\t\t\t\t\tdata_hull, RGBXY_mixing_weights, source_palette = _calculate_convex_hull_data_of_image(np_input_image, output_dir_path)\n\t\t\t\t\t_serialize_and_save_numpy_array(image_uid, RecoloringInterimResultMode.source_palette, source_palette)\n\t\t\t\t\t_serialize_and_save_numpy_array(image_uid, RecoloringInterimResultMode.image_size, np_input_image.shape)\n\t\t\t\t\t_serialize_and_save_numpy_array(image_uid, RecoloringInterimResultMode.image_data, (np_input_image[:, :, :3].reshape((-1, 3))[data_hull.vertices]).reshape((-1, 1, 3)) / 255.0)\n\t\t\t\t\t_serialize_and_save_sparse_matrix(image_uid, RecoloringInterimResultMode.RGBXY_mixing_weights, RGBXY_mixing_weights)\n\t\t\t\t\n\t\t\t\trequest.session['image_loaded'] = True\n\t\t\t\trequest.session['image_file_name'] = file_form.files['source_image'].name\n\t\t\t\treturn render(request, 'imageRecoloring.html', {\n\t\t\t\t\t'file_form': ImageRecoloringForm(),\n\t\t\t\t\t'current_file_name': request.session['image_file_name'],\n\t\t\t\t\t'source_image_path': os.path.join(settings.MEDIA_URL, image_uid, 'source_image.png'),\n\t\t\t\t\t'source_palette': _rgb_to_hex_list(source_palette),\n\t\t\t\t})\n\n\t\telif request.POST.get(\"recolor\") or request.POST.get(\"random\"):\n\t\t\tif not request.session['image_loaded']:\n\t\t\t\treturn render(request, 'imageRecoloring.html', {\n\t\t\t\t\t'file_form': ImageRecoloringForm()\n\t\t\t\t})\n\n\t\t\timage_uid = request.session['image_uid']\n\t\t\tsource_palette = _deserialize_and_load_numpy_array(image_uid, RecoloringInterimResultMode.source_palette)\n\n\t\t\tif request.POST.get(\"recolor\"):\n\t\t\t\ttarget_palette_hex_str = request.POST.get('hex_input')\n\t\t\t\tvalidation_result = view_helper.validate_input(target_palette_hex_str, 1, 4)\n\t\t\t\t\n\t\t\t\tif not validation_result['result']:\n\t\t\t\t\tprint(\"[Error]\", validation_result[\"message\"])\n\t\t\t\t\tmessages.info(request, validation_result[\"message\"])\n\t\t\t\t\treturn render(request, 'imageRecoloring.html', {\n\t\t\t\t\t\t'file_form': ImageRecoloringForm(),\n\t\t\t\t\t\t'current_file_name': request.session['image_file_name'],\n\t\t\t\t\t\t'source_image_path': os.path.join(settings.MEDIA_URL, image_uid, 'source_image.png'),\n\t\t\t\t\t\t'source_palette': _rgb_to_hex_list(source_palette),\n\t\t\t\t\t})\n\t\t\t\t\n\t\t\t\ttarget_palette_hex_list = _make_a_list_from_serialized_hex_string(target_palette_hex_str)\n\t\t\t\ttarget_color_palette = ColorPalette(auto_fetched=False, colors=target_palette_hex_list)\n\t\t\telse:\n\t\t\t\ttarget_color_palette = ColorPalette(auto_fetched=True, palette_length=random.randint(5, 10))\n\n\t\t\t\n\t\t\toutput_dir_path = os.path.join(settings.MEDIA_ROOT, image_uid)\t\t\t\n\n\t\t\tsorted_source_palette, sorted_target_palette, t2s_rgb_palette, tanstal_recolored_image_path, t2sbuffer_recolored_image_path = _recoloring(image_uid, source_palette, target_color_palette)\n\n\t\t\twith open(os.path.join(output_dir_path, 'hex_data.txt'), 'w') as f:\n\t\t\t\tf.write('extracted_palette\\t' + ' '.join(_rgb_to_hex_list(sorted_source_palette)) + '\\n')\n\t\t\t\tf.write('sorted_target_palette\\t' + ' '.join(_rgb_to_hex_list(sorted_target_palette)) + '\\n')\n\t\t\t\tf.write('t2s_palette\\t' + ' '.join(_rgb_to_hex_list(t2s_rgb_palette)) + '\\n')\n\n\t\t\tprint('\\textracted_palette', _rgb_to_hex_list(sorted_source_palette))\n\t\t\tprint('\\tsorted_target_palette', _rgb_to_hex_list(sorted_target_palette))\n\t\t\tprint('\\tt2s_palette', _rgb_to_hex_list(t2s_rgb_palette))\n\n\t\t\treturn render(request, 'imageRecoloring.html', {\n\t\t\t\t'file_form': ImageRecoloringForm(),\n\t\t\t\t'current_file_name': request.session['image_file_name'],\n\t\t\t\t'source_image_path': os.path.join(settings.MEDIA_URL, image_uid, 'source_image.png'),\n\t\t\t\t'source_palette': _rgb_to_hex_list(source_palette),\n\t\t\t\t'target_palette': target_color_palette.to_hex_list(),\n\t\t\t\t't2s_palette':_rgb_to_hex_list(t2s_rgb_palette),\n\t\t\t\t'tanetal_recoloring_path': os.path.join(settings.MEDIA_URL, image_uid, _get_recoloring_file_names(RecoloringResult.tan_et_al)[0]),\n\t\t\t\t't2sbuffer_recoloring_path': os.path.join(settings.MEDIA_URL, image_uid, _get_recoloring_file_names(RecoloringResult.ours)[0])\n\t\t\t})\n\t\telse:\n\t\t\tprint(\"[ERROR] No such post name in imageRecoloring\")\n\telse:\n\t\trequest.session['image_loaded'] = False\n\t\treturn render(request, 'imageRecoloring.html', {'file_form': ImageRecoloringForm()})\t\n\t\n\ndef _make_a_list_from_serialized_hex_string(hex_string):\n\thex_list = hex_string.split('#')[1:]\n\treturn ['#' + h for h in hex_list]\n\n\ndef _get_random_palette_for_palette_navigator():\n\t# reference: https://django-orm-cookbook-ko.readthedocs.io/en/latest/random.html\n\treturn PaletteNavigatorPaletteModel.objects.filter(data_source='palettes_colourLoversTop100').order_by(\"?\").first()\n\n\ndef _calculate_convex_hull_data_of_image(np_input_image, output_dir_path, rereconstructed=False):\n\t# Compute RGBXY_mixing_weights.\n\tprint(\"\\tComputing RGBXY mixing weights ...\")\n\tX, Y = np.mgrid[0:np_input_image.shape[0], 0:np_input_image.shape[1]]\n\tXY = np.dstack((X * 1.0 / np_input_image.shape[0], Y * 1.0 / np_input_image.shape[1]))\n\tRGBXY_data = np.dstack((np_input_image[:, :, :3] / 255.0, XY))\n\tprint(\"\\t\\tConvexHull 5D...\")\n\tdata_hull = ConvexHull(RGBXY_data.reshape((-1, 5)))\n\t\n\tprint(\"\\t\\tComputing W_RGBXY...\")\n\tRGBXY_mixing_weights = Additive_mixing_layers_extraction.recover_ASAP_weights_using_scipy_delaunay(data_hull.points[data_hull.vertices], data_hull.points, option=3)\n\tprint(\"\\t\\tFinish!\")\n\trgb_palette = None\n\n\tif not rereconstructed:\n\t\t# Using determined palette number\n\t\tdata = np_input_image[:, :, :3].reshape((-1, 3))/255.0\n\t\trgb_palette = Additive_mixing_layers_extraction.Hull_Simplification_determined_version(data, output_dir_path)\n\t\trgb_palette = (np.asarray(rgb_palette) * 255).tolist()\n\n\treturn data_hull, RGBXY_mixing_weights, rgb_palette\n\t\n\ndef _recoloring(image_uid, source_palette, target_color_palette):\n\t# sort w/ color palette and input image's palette\n\tprint('\\t\\t\\tDCCW calculation')\n\tcolor_palettes_hex_list = [_rgb_to_hex_list(source_palette), target_color_palette.to_hex_list()]\n\tsource_to_target_closest_points, target_to_source_closest_points, sorted_indices = _get_dccw_closest_points(color_palettes_hex_list)\n\t\n\tsorted_source_palette = [source_palette[i] for i in sorted_indices[0]]\n\n\tprint('\\t\\t\\tSave Tans')\n\t# Result of Tan et al.'s recoloring\n\tsorted_target_palette = target_color_palette.get_values_in_order('RGB', sorted_indices[1])\n\ttanstal_recolored_image_path = _save_result_of_recoloring(sorted_target_palette, image_uid, RecoloringResult.tan_et_al)\n\t\n\tprint('\\t\\t\\tSave t2sbuffer')\n\t# Result of Target-to-source recoloring\n\tt2s_rgb_palette = _lab_to_rgb_list(target_to_source_closest_points)\n\tt2sbuffer_recolored_image_path = _save_result_of_recoloring(sorted_target_palette+t2s_rgb_palette, image_uid, RecoloringResult.ours)\n\n\t# save original palettes\n\tprint('\\t\\t\\tSave original')\n\t_save_image_from_hex_list(_rgb_to_hex_list(source_palette), os.path.join(settings.MEDIA_ROOT, image_uid, 'source_palette.png'))\n\n\treturn sorted_source_palette, sorted_target_palette, t2s_rgb_palette, tanstal_recolored_image_path, t2sbuffer_recolored_image_path\n\n\ndef _save_result_of_recoloring(target_palette, image_uid, mode):\n\tlayers = _reflect_weights_to_image_pixels(target_palette, image_uid)\n\trecolored_image = _reconstruct_image_from_colors_and_weights(target_palette, layers)\n\n\timage_name, palette_image_name = _get_recoloring_file_names(mode)\n\trecolored_image_path = os.path.join(settings.MEDIA_ROOT, image_uid, image_name)\n\trecolored_image.save(recolored_image_path, optimize=False, quality=100)\n\t_save_image_from_hex_list(_rgb_to_hex_list(target_palette), os.path.join(settings.MEDIA_ROOT, image_uid, palette_image_name))\n\n\treturn recolored_image_path\n\n\ndef _reflect_weights_to_image_pixels(target_palette, image_uid):\n\timage_size = _deserialize_and_load_numpy_array(image_uid, RecoloringInterimResultMode.image_size)\n\timage_data = _deserialize_and_load_numpy_array(image_uid, RecoloringInterimResultMode.image_data)\n\tRGBXY_mixing_weights = _deserialize_and_load_sparse_matrix(image_uid, RecoloringInterimResultMode.RGBXY_mixing_weights)\n\toutput_dir_path = os.path.join(settings.MEDIA_ROOT, image_uid)\n\n\ttarget_palette = np.array(target_palette) / 255\n\tnum_layers = target_palette.shape[0]\n\tw_rgb = Additive_mixing_layers_extraction.Get_ASAP_weights_using_Tan_2016_triangulation_and_then_barycentric_coordinates(image_data, target_palette, output_dir_path, order=0)\n\tw_rgb = w_rgb.reshape((-1, num_layers))\n\n\tif USE_OPENCL:\n\t\tw_rgbxy_values = RGBXY_mixing_weights.data\n\t\tw_rgbxy_values = w_rgbxy_values.reshape((-1,6))\n\t\tw_rgbxy_indices = RGBXY_mixing_weights.indices.reshape((-1,6))\n\n\t\tmult, _ = pyopencl_example.prepare_openCL_multiplication(w_rgb, w_rgbxy_values, w_rgbxy_indices)\n\t\tfinal_mixing_weights = mult(w_rgb)\n\n\telse:\n\t\tfinal_mixing_weights = RGBXY_mixing_weights.dot(w_rgb)\n\n\tlayers = final_mixing_weights.reshape((image_size[0], image_size[1], num_layers))\n\t\n\treturn layers\n\n\ndef _lab_to_rgb_list(lab_list):\n\tpalette = []\n\tfor lab in lab_list:\n\t\tL, a, b = lab.tolist()\n\t\tlab = LabColor(L, a, b)\n\t\trgb = convert_color(lab, sRGBColor, target_illuminant='d65')\n\t\trgb = [_rgb_clamp(x) for x in rgb.get_upscaled_value_tuple()]\n\t\tpalette.append(rgb)\n\n\treturn palette\n\n\ndef _rgb_to_hex_list(rgb_list):\n\thex_list = []\n\tfor r, g, b in rgb_list:\n\t\thex_str = \"#{0:02x}{1:02x}{2:02x}\".format(_rgb_clamp(r), _rgb_clamp(g), _rgb_clamp(b))\n\t\thex_list.append(hex_str)\n\treturn hex_list\n\n\ndef _rgb_clamp(x): \n\treturn int(max(0, min(round(x), 255)))\n\n\ndef _reconstruct_image_from_colors_and_weights(colors, weights):\n\tcolors = np.array(colors) / 255\n\theight, width, weight_len = weights.shape\n\treconstructed_image = Image.new(\"RGB\", (width, height), color=0)\n\treconstructed_image_pixels = reconstructed_image.load()\n\n\tresult = np.zeros((height, width, 3))\n\n\tfor weight_index in range(weight_len):\n\t\tcur_layer = weights[:, :, weight_index]\n\t\tfor i in range(3):\n\t\t\tresult[:, :, i] += 255 * cur_layer * colors[weight_index][i]\n\n\treconstructed_image = Image.fromarray(result.astype('uint8'), 'RGB')\n\treturn reconstructed_image\n\n\ndef _save_image_from_hex_list(hex_list, file_name):\n\tcolor_palette = ColorPalette(auto_fetched=False, colors=hex_list)\n\tcolor_block_width = 100\n\n\tbulked_up = color_palette.bulk_up(color_block_width)\n\tImage.fromarray(bulked_up).save(file_name)\n\n\ndef _get_dccw_closest_points(color_palettes_hex_list):\n\tpalettes = ColorPalettes(\n\t auto_fetched=False, color_palettes_list=color_palettes_hex_list, is_hex_list=True)\n\tsource_palette = palettes.get_single_palettes_list()[0]\n\ttarget_palette = palettes.get_single_palettes_list()[1]\n\n\tsimilarity_measurer = SimilarityMeasurer(source_palette, target_palette, LabDistanceMode.CIEDE2000)\n\n\tsource_to_target_closest_points, target_to_source_closest_points = similarity_measurer.closest_points_by_dccw()\n\treturn source_to_target_closest_points, target_to_source_closest_points, similarity_measurer.get_palette_sorted_indices()\n\t"
] |
[
[
"numpy.asarray",
"scipy.sparse.load_npz",
"numpy.dstack",
"scipy.sparse.save_npz",
"numpy.array",
"numpy.zeros"
]
] |
UAVKillerLAB/UK_AC
|
[
"dbeae65f0916df351b39f8b95d01a139894ae78e"
] |
[
"uk_without_timestamp/onlyDOS/SelectAngle.py"
] |
[
"# encoding=utf-8\nimport json\nimport os\nimport socket # 引入套接字\nimport threading # 引入并行\nimport time\n\nimport pymysql\nimport struct\nimport serial\nimport matplotlib.pyplot as plt\nimport datetime\nimport math\nimport sys\nfrom geographiclib.geodesic import Geodesic\nimport configparser\nimport csv\n\n# import comm_udp_serial\n\n# sys.path.append(r'D:\\OneDrive\\Python_project\\Github\\AntiUAV_Python\\test')\n\n# from DBInfo import *\n\nplt.ion() # 开启一个画图的窗口\nax1 = [] # 定义一个 x 轴的空列表用来接收动态的数据\nax2 = []\nach1 = [] # 定义一个 y 轴的空列表用来接收动态的数据\nach2 = []\nach3 = []\nach4 = []\n\nach = []\n\ncount_time = 0\nLast_rawdata = 0\nProcess_data = 0\nch1ch2diff_db_data = []\nch2ch3diff_db_data = []\nch3ch4diff_db_data = []\nch1_db_data = []\nch2_db_data = []\nch3_db_data = []\nch4_db_data = []\n\nangle_db_data = []\nangle_buffer = [math.pi/4, math.pi/4, 0, 0]\n\n\ndef Print4(counnt, ch1_data, ch2_data, ch3_data, ch4_data, angle):\n data = datetime.date.today()\n plt.figure(\"4 Channel&Matched Angle\" + str(data))\n ax2.append(counnt) # 添加 i 到 x 轴的数据中\n ach1.append(ch1_data)\n ach2.append(ch2_data)\n ach3.append(ch3_data)\n ach4.append(ch4_data)\n\n ach.append(angle)\n\n plt.clf() # 清除之前画的图\n plt.subplot(2, 1, 1)\n plt.plot(ax2, ach1, label=\"CH1\") # 画出当前 ax 列表和 ay 列表中的值的图形\n plt.plot(ax2, ach2, label=\"CH2\")\n plt.plot(ax2, ach3, label=\"CH3\")\n plt.plot(ax2, ach4, label=\"CH4\")\n plt.title('Channel Data')\n plt.xlabel(\"Time\")\n plt.ylabel(\"Amplitude\")\n plt.grid(True)\n\n plt.subplot(2, 1, 2)\n plt.plot(ax2, ach, label=\"Matched Angle\")\n plt.title('Angle')\n plt.xlabel(\"Time\")\n plt.ylabel(\"Angle\")\n plt.grid(True)\n plt.pause(0.08) # 暂停一秒\n # plt.ioff() # 关闭画图的窗口\n\n\ndef DataProcess(count, angle_now, angle_last):\n if count == 0:\n return angle_now\n else:\n if abs(angle_now - angle_last) >= 20:\n # print(\"平均\")\n # result = (angle1 + angle2) / 2\n if angle_now - angle_last > 0:\n return angle_last + 10\n if angle_now - angle_last < 0:\n return angle_last - 10\n else:\n return angle_now\n\n\ndef Print1(counnt, ch_data):\n ax1.append(counnt) # 添加 i 到 x 轴的数据中\n ach.append(ch_data)\n\n plt.clf() # 清除之前画的图\n plt.subplot(2, 1, 1)\n plt.plot(ax1, ach1) # 画出当前 ax 列表和 ay 列表中的值的图形\n plt.pause(0.1)\n\n\ndef fetch_data():\n global ch1_db_data\n global ch2_db_data\n global ch3_db_data\n global ch4_db_data\n\n global angle_db_data\n try:\n conn = pymysql.connect(host=db_host, port=3306, db=db_name, user='root', passwd=\"123456\",\n charset='utf8')\n cs1 = conn.cursor()\n cs1.execute(\"select ch1 from final_table\")\n result1 = cs1.fetchall()\n cs1.execute(\"select ch2 from final_table\")\n result2 = cs1.fetchall()\n cs1.execute(\"select ch3 from final_table\")\n result3 = cs1.fetchall()\n cs1.execute(\"select ch4 from final_table\")\n result4 = cs1.fetchall()\n\n cs1.execute(\"select angle from final_table\")\n result5 = cs1.fetchall()\n # print(result1)\n # print(result2)\n # print(result3)\n # print(result4)\n for i in range(len(result5)):\n ch1_db_data.append(float((result1[i])[0]))\n ch2_db_data.append(float((result2[i])[0]))\n ch3_db_data.append(float((result3[i])[0]))\n ch4_db_data.append(float((result4[i])[0]))\n angle_db_data.append(float((result5[i])[0]))\n # print(result1[0])\n conn.commit()\n cs1.close()\n conn.close()\n except Exception as e:\n print(e)\n pass\n\n\ndef SelectAngle(ch1_raw_data, ch2_raw_data, ch3_raw_data, ch4_raw_data):\n \"\"\"\n :param ch1_raw_data: 通道一数据\n :param ch2_raw_data: 通道二数据\n :param ch3_raw_data: 通道三数据\n :param ch4_raw_data: 通道四数据\n :return: result:查表得到的角度值\n \"\"\"\n global ch1ch2diff_db_data, ch2ch3diff_db_data, ch3ch4diff_db_data, ch4_db_data, count_time, Last_rawdata, Process_data\n # 归一化\n min_ch_data = min(ch1_raw_data, ch2_raw_data, ch3_raw_data, ch4_raw_data)\n min_ch_data = float(min_ch_data)\n ch1_data = float(ch1_raw_data) / min_ch_data\n ch2_data = float(ch2_raw_data) / min_ch_data\n ch3_data = float(ch3_raw_data) / min_ch_data\n ch4_data = float(ch4_raw_data) / min_ch_data\n if ch1_data + ch2_data + ch3_data + ch4_data <= 4:\n print(\"数据可能有误!\\n数据可能有误!\\n数据可能有误!\\n数据可能有误!\\n数据可能有误!\")\n\n ch1_data = float(ch1_data)\n ch2_data = float(ch2_data)\n ch3_data = float(ch3_data)\n ch4_data = float(ch4_data)\n sum_difference = []\n\n print(\"len(angle_db_data):\",len(angle_db_data))\n for i in range(len(angle_db_data)):\n ch1ch2_difference = (20 * math.log((ch1_db_data[i] / ch2_db_data[i]), 10) - 20 * math.log((ch1_data / ch2_data),\n 10)) ** 2 # 差值放大\n ch2ch3_difference = (20 * math.log((ch2_db_data[i] / ch3_db_data[i]), 10) - 20 * math.log((ch2_data / ch3_data),\n 10)) ** 2 # 差值放大\n ch3ch4_difference = (20 * math.log((ch3_db_data[i] / ch4_db_data[i]), 10) - 20 * math.log((ch3_data / ch4_data),\n 10)) ** 2 # 差值放大\n sum_difference.append(ch1ch2_difference + ch2ch3_difference + ch3ch4_difference)\n # print(sum_difference)\n # print(min(sum_difference))\n # print(sum_difference.index(min(sum_difference)))\n # return sum_difference.index(min(sum_difference))\n\n global count_time\n global Last_rawdata\n global Process_data\n Last_rawdata = Process_data\n Process_data = sum_difference.index(min(sum_difference))\n Process_data=angle_db_data[Process_data]\n result = DataProcess(count_time, Process_data, Last_rawdata)\n\n count_time += 1\n\n return result\n\n\ndef init():\n global ser, usb_data_len, udp_socket, usb_com, db_host, db_name, db_pwd, db_user, db_port, pc_host, pc_port, s10_correction_angle, s8_correction_angle, station10_latitude, station10_longitude, station8_latitude, station8_longitude\n print(\"读取config.json......\")\n # Reading data from file\n with open(\"config.json\", 'r') as f:\n config_json_data = json.load(f)\n db_host = config_json_data[\"db_host\"]\n print(\"数据库IP地址:\", db_host)\n db_name = config_json_data[\"db_name\"]\n print(\"数据库名:\", db_name)\n db_pwd = str(config_json_data[\"db_password\"])\n db_user = config_json_data[\"db_user\"]\n db_port = config_json_data[\"db_port\"]\n usb_com = config_json_data[\"com\"]\n print(\"COM:\", usb_com)\n station10_latitude = config_json_data[\"station10_latitude\"]\n station10_longitude = config_json_data[\"station10_longitude\"]\n print(\"主站经纬度:({},{})\".format(station10_latitude,station10_longitude))\n station8_latitude = config_json_data[\"station8_latitude\"]\n station8_longitude = config_json_data[\"station8_longitude\"]\n print(\"一站经纬度:({},{})\".format(station8_latitude,station8_longitude))\n s10_correction_angle = config_json_data[\"s10_correction_angle\"]\n print(\"主站0°方位角:\" + str(s10_correction_angle))\n s8_correction_angle = config_json_data[\"s8_correction_angle\"]\n print(\"一站0°方位角:\" + str(s8_correction_angle))\n print(\"\\n读取成功!\\n\")\n create_csv()\n ser = serial.Serial(usb_com, 115200)\n ser.close()\n ser.open()\n usb_data_len = 66\n pc_port = 10001\n udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # 创建套接字\n udp_socket.bind((\"\", pc_port)) # 服务器绑定ip和端口\n # 问题描述:套接字必须发送一次才能接收\n udp_socket.sendto(\"1\".encode(\"utf-8\"), ('192.168.3.100', 8080))\n\n\ndef create_csv():\n global csv_name\n if os.path.exists(\"location_info\"):\n pass\n else:\n os.mkdir(\"location_info\")\n os.chdir(\"location_info\")\n temp = time.strftime(\"%Y%m%d%H%M\")\n csv_name = temp + \".csv\"\n with open(csv_name, 'w', newline='') as f:\n csv_write = csv.writer(f)\n csv_head = [\"time\", \"latitude\", \"longitude\"]\n csv_write.writerow(csv_head)\n print(\"创建成功!\")\n\n\ndef write_csv(time, latitude, longitude):\n with open(csv_name, 'a+', newline='') as f:\n csv_write = csv.writer(f)\n data_row = [time, latitude, longitude]\n csv_write.writerow(data_row)\n\n\ndef positioning(station10_angle, station8_angle, station10_correction, station8_correction, station10_latitude,\n station10_longitude):\n \"\"\"\n :param station10_angle: 主站方位角\n :param station8_angle: 主站修正角\n :param station10_correction: 一站方位角\n :param station8_correction: 一站修正角\n :param station10_latitude: 主站纬度\n :param station10_longitude: 主站经度\n :return:none\n \"\"\"\n \"\"\"\n 修正角为地理学上的方位角。\n 定义:从标准方向的北端起,顺时针方向到直线的水平角称为该直线的方位角。方位角的取值范围为0°~360°。\n\n 一定要区别于方向角。\n 定义:方向角指的是采用某坐标轴方向作为标准方向所确定的方位角。有时,方向角是从正北或正南方向到目标方向所形成的小于九十度的角。\n \"\"\"\n global station8_latitude, station8_longitude\n station10_latitude = float(station10_latitude)\n station10_longitude = float(station10_longitude)\n station10_correction = int(station10_correction)\n # direction_Zh = [\"东偏南\", \"南偏西\", \"西偏北\", \"北偏东\"]\n direction_Zh = [\"北偏东\", \"东偏南\", \"南偏西\", \"西偏北\"]\n \"\"\"\n DISTANCE_S10_S8, ANGLE_A, ANGLE_B需要确定\n \"\"\"\n # DISTANCE_S10_S8 = 398\n # ANGLE_A = 69\n # ANGLE_B = 21\n # DISTANCE_S10_S8 = Geodesic.WGS84.Inverse(station10_latitude, station10_longitude, station8_latitude, station8_longitude)[\"s12\"]\n DISTANCE_S10_S8 = 35.2\n ANGLE_A = 25\n ANGLE_B = 65\n # 一顿计算\n \"\"\"\n 两站与目标构成的三角形的内角\n angle_alpha:alpha为主站角\n angle_beta:beta为一站角\n \"\"\"\n # angle_alpha = math.radians(station10_correction - 180 + station10_angle - ANGLE_A)\n # angle_beta = math.radians(station8_correction - 90 + station8_angle - ANGLE_B)\n # print(180 - station10_angle - ANGLE_A, station8_angle - ANGLE_B)\n angle_alpha = math.radians(180 - station10_angle)\n angle_beta = math.radians(station8_angle)\n\n # 剔除异常值\n angle_buffer[2] = angle_alpha\n angle_buffer[3] = angle_beta\n if angle_alpha + angle_beta >= math.pi:\n # 输出上一个值\n angle_alpha = angle_buffer[0]\n angle_beta = angle_buffer[1]\n angle_buffer[0] = angle_alpha\n angle_buffer[1] = angle_beta\n\n distance_s10_target = (math.sin(angle_beta) * DISTANCE_S10_S8) / (math.sin(math.pi - angle_alpha - angle_beta))\n distance_s8_target = (math.sin(angle_alpha) * DISTANCE_S10_S8) / (math.sin(math.pi - angle_alpha - angle_beta))\n # return distance_s10_target, distance_s8_target\n # 获取每一经度间的距离(单位:米)\n longitude_distance = (Geodesic.WGS84.Inverse(station10_latitude, 104.00001, station10_latitude, 104.00002)[\n \"s12\"]) * 100000\n # print(\"longitude_distance\", longitude_distance)\n LATITUDE_DISTANCE = 110946.304\n\n # 统一极坐标系\n azimuth = (station10_angle + station10_correction) % 360\n print(azimuth)\n # 通过方位角判断方向\n if 0 < azimuth <= 90:\n direction_flag = 0\n target_direction = azimuth\n elif 90 < azimuth <= 180:\n direction_flag = 1\n target_direction = azimuth - 90\n elif 180 < azimuth <= 270:\n direction_flag = 2\n target_direction = azimuth - 180\n elif 270 < azimuth <= 360:\n direction_flag = 3\n target_direction = azimuth - 270\n\n # 北偏东\n if direction_flag == 0:\n # 计算纬度方向和经度方向的长度改变量\n latitude_change_distance = distance_s10_target * math.cos(math.radians(target_direction))\n longitude_change_distance = distance_s10_target * math.sin(math.radians(target_direction))\n # 计算目标纬度和经度\n s10_target_latitude = station10_latitude + latitude_change_distance / longitude_distance\n s10_target_longitude = station10_longitude + longitude_change_distance / LATITUDE_DISTANCE\n # 东偏南\n elif direction_flag == 1:\n # 计算纬度方向和经度方向的长度改变量\n latitude_change_distance = distance_s10_target * math.sin(math.radians(target_direction))\n longitude_change_distance = distance_s10_target * math.cos(math.radians(target_direction))\n # 计算目标纬度和经度\n s10_target_latitude = station10_latitude - latitude_change_distance / longitude_distance\n s10_target_longitude = station10_longitude + longitude_change_distance / LATITUDE_DISTANCE\n # 南偏西\n elif direction_flag == 2:\n # 计算纬度方向和经度方向的长度改变量\n latitude_change_distance = distance_s10_target * math.cos(math.radians(target_direction))\n longitude_change_distance = distance_s10_target * math.sin(math.radians(target_direction))\n # 计算目标纬度和经度\n s10_target_latitude = station10_latitude - latitude_change_distance / longitude_distance\n s10_target_longitude = station10_longitude - longitude_change_distance / LATITUDE_DISTANCE\n # 西偏北\n elif direction_flag == 3:\n # 计算纬度方向和经度方向的长度改变量\n latitude_change_distance = distance_s10_target * math.sin(math.radians(target_direction))\n longitude_change_distance = distance_s10_target * math.cos(math.radians(target_direction))\n # 计算目标纬度和经度\n s10_target_latitude = station10_latitude + latitude_change_distance / longitude_distance\n s10_target_longitude = station10_longitude - longitude_change_distance / LATITUDE_DISTANCE\n\n print(\"获取主站角度{}°\\n目标在主站 {}{}° 方向,距离 {} 米\".format(station10_angle, direction_Zh[direction_flag],\n target_direction,\n distance_s10_target))\n # 统一极坐标系\n azimuth = (station8_angle + station8_correction) % 360\n # print(azimuth)\n # 通过方位角判断方向\n if 0 < azimuth <= 90:\n direction_flag = 0\n target_direction = azimuth\n elif 90 < azimuth <= 180:\n direction_flag = 1\n target_direction = azimuth - 90\n elif 180 < azimuth <= 270:\n direction_flag = 2\n target_direction = azimuth - 180\n elif 270 < azimuth <= 360:\n direction_flag = 3\n target_direction = azimuth - 270\n\n # 北偏东\n if direction_flag == 0:\n # 计算纬度方向和经度方向的长度改变量\n latitude_change_distance = distance_s8_target * math.cos(math.radians(target_direction))\n longitude_change_distance = distance_s8_target * math.sin(math.radians(target_direction))\n # 计算目标纬度和经度\n s8_target_latitude = station8_latitude + latitude_change_distance / longitude_distance\n s8_target_longitude = station8_longitude + longitude_change_distance / LATITUDE_DISTANCE\n # 东偏南\n elif direction_flag == 1:\n # 计算纬度方向和经度方向的长度改变量\n latitude_change_distance = distance_s8_target * math.sin(math.radians(target_direction))\n longitude_change_distance = distance_s8_target * math.cos(math.radians(target_direction))\n # 计算目标纬度和经度\n s8_target_latitude = station8_latitude - latitude_change_distance / longitude_distance\n s8_target_longitude = station8_longitude + longitude_change_distance / LATITUDE_DISTANCE\n # 南偏西\n elif direction_flag == 2:\n # 计算纬度方向和经度方向的长度改变量\n latitude_change_distance = distance_s8_target * math.cos(math.radians(target_direction))\n longitude_change_distance = distance_s8_target * math.sin(math.radians(target_direction))\n # 计算目标纬度和经度\n s8_target_latitude = station8_latitude - latitude_change_distance / longitude_distance\n s8_target_longitude = station8_longitude - longitude_change_distance / LATITUDE_DISTANCE\n # 西偏北\n elif direction_flag == 3:\n # 计算纬度方向和经度方向的长度改变量\n latitude_change_distance = distance_s8_target * math.sin(math.radians(target_direction))\n longitude_change_distance = distance_s8_target * math.cos(math.radians(target_direction))\n # 计算目标纬度和经度\n s8_target_latitude = station8_latitude + latitude_change_distance / longitude_distance\n s8_target_longitude = station8_longitude - longitude_change_distance / LATITUDE_DISTANCE\n print(\"获取一站角度{}°\\n目标在一站 {}{}° 方向,距离 {} 米\".format(station8_angle, direction_Zh[direction_flag], target_direction,\n distance_s8_target))\n\n target_latitude = (s10_target_latitude + s8_target_latitude) / 2\n target_longitude = (s10_target_longitude + s8_target_longitude) / 2\n print(\"目标WGS84坐标({},{})\".format(target_latitude, target_longitude))\n temp = time.strftime(\"%H%M%S\")\n write_csv(temp, target_latitude, target_longitude)\n return target_latitude, target_longitude\n\n\ndef USB_recv(ser, usb_data_len):\n # 解析数据\n USB_recv_data = ((ser.read(usb_data_len)).decode('ASCII')).replace(\"\\r\\n\", \"\")\n USB_recv_data = bytes(USB_recv_data, encoding=\"utf8\")\n data = [int((\"0x\" + (USB_recv_data[0: 16].decode())), 16), int((\"0x\" + (USB_recv_data[16: 32].decode())), 16),\n int((\"0x\" + (USB_recv_data[32: 48].decode())), 16), int((\"0x\" + (USB_recv_data[48: 64].decode())), 16)]\n return data\n\n\ndef udp_send_ersuo(udp_socket, BetaAngle):\n mhesIPAddr = '192.168.3.70' # 民航二所IP\n mhesPort = 10002 # 民航二所Port\n\n send_data_head = 0xb3b3\n send_data_headlen = 34\n send_data_latitude = 103 + (45 / 60) / 100 + (20 / 3600) / 10000\n send_data_longitude = 31 + (7 / 60) / 100 + (2 / 3600) / 10000\n send_data_height = 790\n send_data_tarqua = 1\n send_data_end = 0xb1af\n send_data_tracknum = 1\n send_data_trackdis = 0\n send_data_bata = BetaAngle\n send_data_alpha = 0\n send_data_trackrate = 0\n send_data = struct.pack('<HHdddIHiffff', send_data_head, send_data_headlen, send_data_latitude,\n send_data_longitude, send_data_height, send_data_tarqua, send_data_end,\n send_data_tracknum, send_data_trackdis, send_data_bata, send_data_alpha,\n send_data_trackrate)\n\n print(len(send_data))\n # input('enter')\n # send_data = send_data.encode('utf-8') #这行代码需要测试 一达测试版本中没有这一行\n udp_socket.sendto(send_data, (mhesIPAddr, mhesPort)) # sendto(发送数据,发送地址)\n # udp_socket.close()\n\n\ndef udp_recv(udp_socket):\n receive_message, client = udp_socket.recvfrom(4096)\n # data = struct.unpack('<4q', receive_message) # 调用struct库解包,>4q代表4个long long 大端对齐<4q代表4个long long 小端对齐\n data = struct.unpack('<HHdddIHiffff', receive_message) # 调用struct库解包,>4q代表4个long long 大端对齐<4q代表4个long long 小端对齐\n warning = \"板子是小端,网络调试助手是大端!!!\"\n return data[9]\n\n\ndef main():\n init()\n fetch_data()\n input(\"Press any key to start.......\")\n print(\"Waiting for data......\")\n global count\n count = 0\n while True:\n count += 1\n decoded_data = USB_recv(ser, usb_data_len)\n print(\"Raw_CH1_data:{:.15f}\\nRaw_CH2_data:{:.15f}\\nRaw_CH3_data:{:.15f}\\nRaw_CH4_data:{:.15f}\".format(\n decoded_data[0], decoded_data[1], decoded_data[2], decoded_data[3]))\n s10_matched_angle = SelectAngle(decoded_data[0], decoded_data[1], decoded_data[2], decoded_data[3])\n print('s10_matched_angle:', s10_matched_angle)\n # # 求平均,平滑数据\n # matched_angle_buff = []\n # matched_angle_buff.append(s10_matched_angle)\n # if len(matched_angle_buff) >= 5:\n # matched_angle_mid = sum(matched_angle_buff) / (len(matched_angle_buff) * 1.0)\n # matched_angle_buff = [] # 覆盖为一个没有元素的列表\n \"\"\"\n udp发送函数\n \"\"\"\n # udp_send_ersuo(udp_socket, matched_angle_mid)\n Print4(count, decoded_data[0], decoded_data[1], decoded_data[2], decoded_data[3], s10_matched_angle)\n print(\"当前角度为:{}\".format(s10_matched_angle))\n \"\"\"\n 解算函数\n \"\"\"\n s8_matched_angle = udp_recv(udp_socket)\n positioning(s10_matched_angle, s8_matched_angle, s10_correction_angle, s8_correction_angle, station10_latitude,\n station10_longitude)\n print(\"\\n\\n\")\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.ylabel"
]
] |
bigTailHao/mmaction2
|
[
"32826c99914bc99ced467a81d3dedda43558b9e2"
] |
[
"demo/demo.py"
] |
[
"import argparse\n\nimport torch\n\nfrom mmaction.apis import inference_recognizer, init_recognizer\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='MMAction2 demo')\n parser.add_argument('config', help='test config file path')\n parser.add_argument('checkpoint', help='checkpoint file')\n parser.add_argument('video', help='video file')\n parser.add_argument('label', help='label file')\n parser.add_argument(\n '--device', type=str, default='cuda:0', help='CPU/CUDA device option')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n # assign the desired device.\n device = torch.device(args.device)\n # build the recognizer from a config file and checkpoint file\n model = init_recognizer(args.config, args.checkpoint, device=device)\n # test a single video\n results = inference_recognizer(model, args.video, args.label)\n\n print('The top-5 labels with corresponding scores are:')\n for result in results:\n print(f'{result[0]}: ', result[1])\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.device"
]
] |
katsugeneration/social-dashboard
|
[
"ab55935aa82a8aeab057cbf196f3b86e6158c8f3"
] |
[
"runner/jasso-gakuseiseikatsu-stats-importer/main.py"
] |
[
"from typing import Optional\nimport os\nimport requests\nimport io\nimport pandas as pd\nfrom google.cloud import storage\nfrom google.cloud import datacatalog_v1\nfrom google.cloud import bigquery\nfrom fastapi import FastAPI, Response\nfrom pydantic import BaseModel\n\ntry:\n from utils.clients import data_catalog\nexcept: # noqa E722\n from ..utils.clients import data_catalog\n\napp = FastAPI(debug=False)\n\n\nclass RequestPayload(BaseModel):\n target_year: int\n target_path: str\n\n\ndef load_stats_raw(\n storage_client: storage.Client,\n datacatalog: data_catalog.Client,\n region: str,\n entry_group: datacatalog_v1.EntryGroup,\n tag_template: datacatalog_v1.TagTemplate,\n target_year: int,\n target_path: str,\n) -> Optional[io.BytesIO]:\n bucket_name = \"jasso-gakuseiseikatsu-stats-raw\"\n bucket = storage_client.bucket(bucket_name)\n if not bucket.exists():\n bucket.create(location=region)\n\n entry_id = \"jasso_gakuseiseikatsu_stats_raw\"\n entry = datacatalog.get_entry(entry_group, entry_id)\n if entry is None:\n entry = datacatalog_v1.types.Entry()\n entry.display_name = bucket_name\n entry.gcs_fileset_spec.file_patterns.append(f\"gs://{bucket_name}/*.xlsx\")\n entry.type_ = datacatalog_v1.EntryType.FILESET\n entry = datacatalog.create_entry(entry_group, entry_id, entry)\n\n tag = datacatalog.get_tag(entry)\n if tag is None:\n tag = datacatalog_v1.types.Tag()\n tag.template = tag_template.name\n tag.fields[\"data_sources\"] = datacatalog_v1.types.TagField()\n tag.fields[\n \"data_sources\"\n ].string_value = \"JASSO学生生活調査 https://www.jasso.go.jp/about/statistics/gakusei_chosa/index.html\"\n tag.fields[\"license\"] = datacatalog_v1.types.TagField()\n tag.fields[\n \"license\"\n ].string_value = \"日本学生支援機構が「学生生活調査」「高等専門学校生生活調査」「専修学校生生活調査」の結果として公開している情報は、出典の記載をしていただいた上で、どなたでも自由に利用できます。 https://www.jasso.go.jp/about/statistics/gakusei_chosa/riyou.html\"\n tag = datacatalog.create_tag(entry, tag=tag)\n tag = datacatalog.set_status_running(tag)\n\n raw_dir = bucket.blob(f\"data_{target_year}.xlsx\")\n res = requests.get(\n target_path,\n )\n content = None\n if res.status_code == 200:\n content = io.BytesIO(res.content)\n res.close()\n raw_dir.upload_from_file(content)\n tag = datacatalog.set_status_completed(tag)\n\n return content\n\n\ndef load_income_stats(\n storage_client: storage.Client,\n datacatalog: data_catalog.Client,\n region: str,\n entry_group: datacatalog_v1.EntryGroup,\n tag_template: datacatalog_v1.TagTemplate,\n content: Optional[io.BytesIO],\n target_year: int,\n) -> pd.DataFrame:\n\n bucket_name = \"jasso-gakuseiseikatsu-stats-annual-income-divide-university\"\n\n entry_id = \"jasso_gakuseiseikatsu_stats_annual_income_divide_university\"\n entry = datacatalog.get_entry(entry_group, entry_id)\n if entry is None:\n entry = datacatalog_v1.types.Entry()\n entry.display_name = bucket_name\n entry.gcs_fileset_spec.file_patterns.append(f\"gs://{bucket_name}/*.parquet\")\n entry.type_ = datacatalog_v1.EntryType.FILESET\n\n columns = []\n columns.append(\n datacatalog_v1.types.ColumnSchema(\n column=\"year\",\n type_=\"INTEGER\",\n mode=\"REQUIRED\",\n description=\"Data Ingestion Year\",\n )\n )\n\n columns.append(\n datacatalog_v1.types.ColumnSchema(\n column=\"sex\", type_=\"STRING\", mode=\"REQUIRED\", description=\"Sex Type\"\n )\n )\n\n columns.append(\n datacatalog_v1.types.ColumnSchema(\n column=\"university_type\",\n type_=\"STRING\",\n mode=\"REQUIRED\",\n description=\"University Type\",\n )\n )\n\n columns.append(\n datacatalog_v1.types.ColumnSchema(\n column=\"range\",\n type_=\"STRING\",\n mode=\"REQUIRED\",\n description=\"Income Range\",\n )\n )\n\n columns.append(\n datacatalog_v1.types.ColumnSchema(\n column=\"rate\",\n type_=\"DOUBLE\",\n mode=\"REQUIRED\",\n description=\"Income Range Rate at specfic Sex and University type\",\n )\n )\n\n entry.schema.columns.extend(columns)\n entry = datacatalog.create_entry(entry_group, entry_id, entry)\n\n tag = datacatalog.get_tag(entry)\n if tag is None:\n tag = datacatalog_v1.types.Tag()\n tag.template = tag_template.name\n tag.fields[\"data_sources\"] = datacatalog_v1.types.TagField()\n tag.fields[\n \"data_sources\"\n ].string_value = (\n \"gs://jasso-gakuseiseikatsu-stats-annual-income-divide-university\"\n )\n tag.fields[\"license\"] = datacatalog_v1.types.TagField()\n tag.fields[\n \"license\"\n ].string_value = \"日本学生支援機構が「学生生活調査」「高等専門学校生生活調査」「専修学校生生活調査」の結果として公開している情報は、出典の記載をしていただいた上で、どなたでも自由に利用できます。 https://www.jasso.go.jp/about/statistics/gakusei_chosa/riyou.html\"\n tag = datacatalog.create_tag(entry, tag=tag)\n tag = datacatalog.set_status_running(tag)\n\n df = pd.read_excel(content, sheet_name=8)\n data = []\n row_num = {\n \"男\": {\n \"国立\": 4,\n \"公立\": 5,\n \"私立\": 6,\n },\n \"女\": {\n \"国立\": 7,\n \"公立\": 8,\n \"私立\": 9,\n },\n \"平均\": {\n \"国立\": 11,\n \"公立\": 13,\n \"私立\": 15,\n \"平均\": 17,\n },\n }\n df.iloc[2] = df.iloc[2].str.replace(\"\\s\", \"\", regex=True)\n label_first_index = 0\n while True:\n label = df.iloc[2, label_first_index]\n if label == \"200万円未満\":\n break\n label_first_index += 1\n for s in [\"男\", \"女\", \"平均\"]:\n for g in [\"国立\", \"公立\", \"私立\", \"平均\"]:\n if g not in row_num[s]:\n continue\n ic = row_num[s][g]\n for i in range(label_first_index, label_first_index+15):\n k = df.iloc[2, i]\n data.append((s, g, k, df.iloc[ic, i]))\n\n bucket = storage_client.bucket(bucket_name)\n if not bucket.exists():\n bucket.create(location=region)\n raw_dir = bucket.blob(f\"year={target_year}/data.parquet\")\n\n res = pd.DataFrame(data, columns=[\"sex\", \"university_type\", \"range\", \"rate\"])\n content = io.BytesIO()\n res.to_parquet(content)\n content.seek(0)\n raw_dir.upload_from_file(content)\n tag = datacatalog.set_status_completed(tag)\n\n\ndef create_external_table(project_id: str) -> None:\n client = bigquery.Client()\n dataset_id = \"social_dataset\"\n dataset_ref = bigquery.DatasetReference(project_id, dataset_id)\n\n table_id = \"jasso_gakuseiseikatsu_stats_annual_income_divide_university\"\n table = bigquery.Table(dataset_ref.table(table_id))\n\n external_config = bigquery.ExternalConfig(\"PARQUET\")\n external_config.source_uris = [\n \"gs://jasso-gakuseiseikatsu-stats-annual-income-divide-university/*\"\n ]\n external_config.autodetect = True\n hive_partitioning = bigquery.external_config.HivePartitioningOptions()\n hive_partitioning.mode = \"AUTO\"\n hive_partitioning.require_partition_filter = False\n hive_partitioning.source_uri_prefix = (\n \"gs://jasso-gakuseiseikatsu-stats-annual-income-divide-university\"\n )\n external_config.hive_partitioning = hive_partitioning\n table.external_data_configuration = external_config\n\n table = client.create_table(table, exists_ok=True)\n\n\[email protected](\"/\")\ndef handler(payload: RequestPayload):\n storage_client = storage.Client()\n\n # The name for the new bucket\n project_id = os.environ.get(\"GOOGLE_CLOUD_PROJECT\")\n region = os.environ.get(\"GCLOUD_REGION\")\n target_year = payload.target_year\n target_path = payload.target_path\n\n datacatalog = data_catalog.Client(project_id, region)\n entry_group_id = \"social_data\"\n tag_template_id = \"data_ingestion\"\n entry_group = datacatalog.get_entry_group(entry_group_id)\n tag_template = datacatalog.get_tag_template(tag_template_id)\n\n content = load_stats_raw(\n storage_client,\n datacatalog,\n region,\n entry_group,\n tag_template,\n target_year,\n target_path,\n )\n\n load_income_stats(\n storage_client,\n datacatalog,\n region,\n entry_group,\n tag_template,\n content,\n target_year,\n )\n\n create_external_table(project_id)\n\n return Response(status_code=200)\n"
] |
[
[
"pandas.read_excel",
"pandas.DataFrame"
]
] |
UVA-DSI-2019-Capstones/ARL
|
[
"74d43bf975db90da70696d4b68002d6971164840"
] |
[
"db/trainee_identifier_test.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport sqlite3\nfrom sqlite3 import Error\n\npath = '../mechanical_turk/Dialogue.xlsx'\ndme = pd.ExcelFile(path)\ndme_scores = dme.parse('DME Score')\ndme_sheet = dme.parse('DME')\n\nidentifiers = []\ntrainee_response_data = []\nfeedbacks = []\nn = 1\n\nfor index, row in dme_scores.iterrows():\n avatar_prompt_id = n\n n = n + 1\n identifier = row['Identifier']\n response_text = row['Dialogue Text']\n response_score = row['Average']\n sheet_row = dme_sheet[dme_sheet['Identifier'] == identifier]\n\n if sheet_row is None or sheet_row['Feedback']is None or sheet_row['Feedback'].empty:\n feedback = ''\n else:\n feedback = sheet_row['Feedback'].values[0]\n comment = ''\n trainee_response_data.append((avatar_prompt_id, identifier, response_text, round(float(response_score), 2), feedback, comment))\n\ndef create_connection(db_file):\n try:\n conn = sqlite3.connect(db_file)\n return(conn)\n except Error as e:\n print(e)\n\ndb = create_connection(\"database.db\")\nc = db.cursor()\nc.executemany('insert into trainee_response(avatar_prompt_id, identifier, response_text, response_score, response_feedback, comment) values (?,?,?,?,?,?)', trainee_response_data)\n\ndb.commit()\ndb.close()"
] |
[
[
"pandas.ExcelFile"
]
] |
KaiLiCn/hpv_integration
|
[
"592a99edc57f6ff4b3fe0f0169ada1b7340d19d5"
] |
[
"get_data.py"
] |
[
"import pandas as pd\n\n\ndef inergrate_data(insertion_file):\n insertion_data = pd.read_csv(insertion_file)\n\n insertion_data = insertion_data.drop(insertion_data.columns[0], axis=1).drop_duplicates()\n\n sample_list_datatable = insertion_data.drop_duplicates(subset=['sample', 'gene'])[['sample', 'gene']]\n\n return sample_list_datatable, insertion_data\n"
] |
[
[
"pandas.read_csv"
]
] |
benji1123/modelE
|
[
"cc21d692693c5cb1f8d504ee385a2035a854a1c8"
] |
[
"Raspberry Pi/LaneDetection/RPilaneDetection.py"
] |
[
"'''\nSPEED TEST\n---------\n- Dell XPS 13: \n - 20 fps with native webcam res \n - 18 fps with enforced 640x420 res\n- Raspberry Pi: \n'''\nimport numpy as np\nimport logging\nimport math\nimport datetime\nimport sys\nimport cv2\nimport time\n\n_SHOW_IMAGE = False\n_ENFORCE_RESOLUTION = True\n_KEEP_RUNNING = True\n\nCAM_WIDTH = 160\nCAM_HEIGHT = 120\nNUM_FRAMES = 100 # process this many frames before quitting program\n\n###\n\nclass HandCodedLaneFollower(object):\n \n # CONSTRUCTOR\n def __init__(self, car=None):\n logging.info('Creating a HandCodedLaneFollower...')\n self.car = car\n self.curr_steering_angle = 90\n\n # FINDS & PROCESSES LANE-LINES\n def follow_lane(self, frame):\n # Main entry point of the lane follower\n show_image(\"orig\", frame)\n lane_lines_arr, frame = detect_lane(frame)\n final_frame = self.steer(frame, lane_lines_arr)\n return final_frame\n \n # COMPUTES STEERING DIRECTION\n def steer(self, frame, lane_lines_arr):\n logging.debug('steering...')\n if len(lane_lines_arr) == 0:\n logging.error('No lane lines detected, nothing to do.')\n return frame\n \n # compute STEERING ANGLE\n new_steering_angle = compute_steering_angle(frame, lane_lines_arr)\n self.curr_steering_angle = stabilize_steering_angle(self.curr_steering_angle, new_steering_angle, len(lane_lines_arr))\n \n # Send steering-signal to Arduino\n if self.car is not None:\n self.car.front_wheels.turn(self.curr_steering_angle)\n curr_heading_image = display_heading_line(frame, self.curr_steering_angle)\n show_image(\"heading\", curr_heading_image)\n\n return curr_heading_image\n\n\n############################\n# Frame processing steps\n############################\n\n'''LANE-DETECTION MASTER'''\ndef detect_lane(frame):\n logging.debug('detecting lane lines...')\n # EDGE DETECTION\n edges = detect_edges(frame)\n show_image('edges', edges)\n # REMOVE IRRELEVANCIES\n cropped_edges = region_of_interest(edges)\n show_image('edges cropped', cropped_edges)\n # LINE-DETECTION\n line_segments_arr = detect_line_segments(cropped_edges)\n line_segment_image = display_lines(frame, line_segments_arr)\n show_image(\"line segments\", line_segment_image)\n # LANE-LINE CONSTRUCTION\n lane_lines_arr = average_slope_intercept(frame, line_segments_arr)\n lane_lines_image = display_lines(frame, lane_lines_arr)\n show_image(\"lane lines\", lane_lines_image)\n return lane_lines_arr, lane_lines_image\n\ndef detect_edges(frame):\n # only check pixels that're the color of lane-lines\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n show_image(\"hsv\", hsv)\n mask = cv2.inRange(hsv, np.array([30, 40, 0]) , np.array([150, 255, 255]))\n # detect the remaining edges in the image, including lane-lines\n edges = cv2.Canny(mask, 200, 400)\n show_image(\"blue mask\", mask)\n return edges\n\n# lane-lines are in bottom half of frame\ndef region_of_interest(canny):\n height, width = canny.shape\n mask = np.zeros_like(canny)\n # 4-corners of top-half\n polygon = np.array([[ \n (0, height * 1 / 2),\n (width, height * 1 / 2),\n (width, height),\n (0, height),\n ]], np.int32)\n # white-out top-half of frame\n cv2.fillPoly(mask, polygon, 255)\n masked_image = cv2.bitwise_and(canny, mask)\n show_image(\"mask\", mask)\n return masked_image\n\n# find LINES in frame\ndef detect_line_segments(cropped_edges):\n '''\n No mask will cleanly identify the 2 lane-lines. \n Instead, the mask detects a multiple lane-segments for each lane.\n '''\n # tuning min_threshold, minLineLength, maxLineGap is a trial and error process by hand\n rho = 1 # precision in pixel, i.e. 1 pixel\n angle = np.pi / 180 # degree in radian, i.e. 1 degree\n min_threshold = 10 # minimal of votes\n line_segments_arr = cv2.HoughLinesP(cropped_edges, rho, angle, min_threshold, np.array([]), minLineLength=8,\n maxLineGap=4)\n if line_segments_arr is not None:\n for line_segment in line_segments_arr:\n logging.debug('detected line_segment:')\n logging.debug(\"%s of length %s\" % (line_segment, length_of_line_segment(line_segment[0])))\n return line_segments_arr\n\n# derive LANE-LINES from identified line-segments\ndef average_slope_intercept(frame, line_segments_arr):\n \"\"\"\n This function combines line-segments into one or two lane lines\n If all line slopes are < 0: then we only have detected left lane\n If all line slopes are > 0: then we only have detected right lane\n \"\"\"\n lane_lines_arr = []\n if line_segments_arr is None:\n logging.info('No line_segment segments detected')\n return lane_lines_arr\n\n height, width, _ = frame.shape\n left_fit_segments = [] # create L-lane from scattered segments (see image in README.md)\n right_fit_segments = [] # create R-lane from scattered segments\n # (the line in the middle is artificially generated later on)\n \n # constrain detection of L & R lanes\n boundary = 1/3\n left_region_boundary = width * (1 - boundary) # L lane-line is on left 2/3 of screen\n right_region_boundary = width * boundary # R lane line is right 2/3 (the area outside this bound)\n\n # use SLOPE to differentite L&R lane-segments\n for line_segment in line_segments_arr:\n for x1, y1, x2, y2 in line_segment: \n if x1 == x2:\n logging.info('skipping vertical line segment (slope=inf): %s' % line_segment)\n continue\n fit = np.polyfit((x1, x2), (y1, y2), 1)\n slope = fit[0]\n intercept = fit[1]\n # left lane\n if slope < 0:\n if x1 < left_region_boundary and x2 < left_region_boundary:\n left_fit_segments.append((slope, intercept))\n # right lane\n else:\n if x1 > right_region_boundary and x2 > right_region_boundary:\n right_fit_segments.append((slope, intercept))\n \n # construct lanes\n left_fit_average = np.average(left_fit_segments, axis=0)\n if len(left_fit_segments) > 0:\n lane_lines_arr.append(make_points(frame, left_fit_average))\n\n right_fit_average = np.average(right_fit_segments, axis=0)\n if len(right_fit_segments) > 0:\n lane_lines_arr.append(make_points(frame, right_fit_average))\n\n logging.debug('lane lines: %s' % lane_lines_arr) # [[[316, 720, 484, 432]], [[1009, 720, 718, 432]]]\n\n return lane_lines_arr\n\n\n''' Steering Functionality '''\ndef compute_steering_angle(frame, lane_lines_arr):\n \"\"\" Find the steering angle based on lane line coordinate\n \n **************** We assume that camera is calibrated to point to dead center ****************\n \n \"\"\"\n if len(lane_lines_arr) == 0:\n logging.info('No lane lines detected')\n return -90\n\n height, width, _ = frame.shape\n if len(lane_lines_arr) == 1:\n logging.debug('Only detected one lane line, just follow it. %s' % lane_lines_arr[0])\n x1, _, x2, _ = lane_lines_arr[0][0]\n x_offset = x2 - x1\n else:\n _, _, left_x2, _ = lane_lines_arr[0][0]\n _, _, right_x2, _ = lane_lines_arr[1][0]\n camera_mid_offset_percent = 0.02 # 0.0 means car pointing to center, -0.03: car is centered to left, +0.03 means car pointing to right\n mid = int(width / 2 * (1 + camera_mid_offset_percent))\n x_offset = (left_x2 + right_x2) / 2 - mid\n\n # find the steering angle, which is angle between navigation direction to end of center line\n y_offset = int(height / 2)\n\n angle_to_mid_radian = math.atan(x_offset / y_offset) # angle (in radian) to center vertical line\n angle_to_mid_deg = int(angle_to_mid_radian * 180.0 / math.pi) # angle (in degrees) to center vertical line\n steering_angle = angle_to_mid_deg + 90 # this is the steering angle needed by picar front wheel\n\n logging.debug('new steering angle: %s' % steering_angle)\n return steering_angle\n\ndef stabilize_steering_angle(curr_steering_angle, new_steering_angle, num_of_lane_lines, max_angle_deviation_two_lines=5, max_angle_deviation_one_lane=1):\n \"\"\"\n Using last steering angle to stabilize the steering angle\n This can be improved to use last N angles, etc\n if new angle is too different from current angle, only turn by max_angle_deviation degrees\n \"\"\"\n if num_of_lane_lines == 2 :\n # if both lane lines detected, then we can deviate more\n max_angle_deviation = max_angle_deviation_two_lines\n else :\n # if only one lane detected, don't deviate too much\n max_angle_deviation = max_angle_deviation_one_lane\n \n angle_deviation = new_steering_angle - curr_steering_angle\n if abs(angle_deviation) > max_angle_deviation:\n stabilized_steering_angle = int(curr_steering_angle\n + max_angle_deviation * angle_deviation / abs(angle_deviation))\n else:\n stabilized_steering_angle = new_steering_angle\n logging.info('Proposed angle: %s, stabilized angle: %s' % (new_steering_angle, stabilized_steering_angle))\n return stabilized_steering_angle\n\n\n############################\n# Utility Functions\n############################\ndef display_lines(frame, lines, line_color=(0, 255, 0), line_width=10):\n line_image = np.zeros_like(frame)\n if lines is not None:\n for line in lines:\n for x1, y1, x2, y2 in line:\n cv2.line(line_image, (x1, y1), (x2, y2), line_color, line_width)\n line_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1)\n return line_image\n\n\ndef display_heading_line(frame, steering_angle, line_color=(0, 0, 255), line_width=5, ):\n heading_image = np.zeros_like(frame)\n height, width, _ = frame.shape\n\n # figure out the heading line from steering angle\n # heading line (x1,y1) is always center bottom of the screen\n # (x2, y2) requires a bit of trigonometry\n\n # Note: the steering angle of:\n # 0-89 degree: turn left\n # 90 degree: going straight\n # 91-180 degree: turn right \n steering_angle_radian = steering_angle / 180.0 * math.pi\n x1 = int(width / 2)\n y1 = height\n x2 = int(x1 - height / 2 / math.tan(steering_angle_radian))\n y2 = int(height / 2)\n\n cv2.line(heading_image, (x1, y1), (x2, y2), line_color, line_width)\n heading_image = cv2.addWeighted(frame, 0.8, heading_image, 1, 1)\n\n return heading_image\n\ndef length_of_line_segment(line):\n x1, y1, x2, y2 = line\n return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n\ndef show_image(title, frame, show=_SHOW_IMAGE):\n if show:\n cv2.imshow(title, frame)\n\ndef make_points(frame, line):\n height, width, _ = frame.shape\n slope, intercept = line\n y1 = height # bottom of the frame\n y2 = int(y1 * 1 / 2) # make points from middle of the frame down\n\n # bound the coordinates within the frame\n x1 = max(-width, min(2 * width, int((y1 - intercept) / slope)))\n x2 = max(-width, min(2 * width, int((y2 - intercept) / slope)))\n return [[x1, y1, x2, y2]]\n\n############################\n# Test Functions\n############################\ndef test_photo(file):\n land_follower = HandCodedLaneFollower()\n frame = cv2.imread(file)\n combo_image = land_follower.follow_lane(frame)\n show_image('final', combo_image, True)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef getFPS(start, numFrames):\n end = time.time()\n elapsedSeconds = end - start\n print(\"Time taken : {0} seconds\".format(elapsedSeconds))\n fps = numFrames/elapsedSeconds\n print(\"Estimated FPS: {0}\".format(fps))\n\n\ndef test_video(video_file):\n lane_follower = HandCodedLaneFollower()\n # cap = cv2.VideoCapture(video_file + '.avi')\n cap = cv2.VideoCapture(0)\n \n # lower RESOLUTION for faster processing [https://picamera.readthedocs.io/en/release-1.12/fov.html]\n if _ENFORCE_RESOLUTION:\n cap.set(cv2.CAP_PROP_FRAME_WIDTH,CAM_WIDTH);\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT,CAM_HEIGHT);\n\n # skip first second of video.\n for i in range(3):\n _, frame = cap.read()\n\n video_type = cv2.VideoWriter_fourcc(*'XVID')\n video_overlay = cv2.VideoWriter(\"%s_overlay.avi\" % (video_file), video_type, 20.0, (320, 240))\n\n # make a timer for FPS computation\n start = time.time()\n end = 0\n try:\n i = 0\n while cap.isOpened():\n _, frame = cap.read()\n print('frame %s' % i )\n combo_image= lane_follower.follow_lane(frame)\n cv2.imwrite(\"%s_%03d_%03d.png\" % (video_file, i, lane_follower.curr_steering_angle), frame)\n cv2.imwrite(\"%s_overlay_%03d.png\" % (video_file, i), combo_image)\n video_overlay.write(combo_image)\n cv2.imshow(\"Road with Lane line\", combo_image)\n i += 1\n\n if not _KEEP_RUNNING:\n if i >= NUM_FRAMES:\n getFPS(start, i)\n break\n\n # press 'q' to quit\n if cv2.waitKey(1) & 0xFF == ord('q'):\n # print overall FPS\n getFPS(start, i)\n break\n finally:\n cap.release()\n video_overlay.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n test_video('/home/pi/DeepPiCar/driver/data/tmp/video01')\n # test_photo('images/test.png')\n #test_photo(sys.argv[1])\n #test_video(sys.argv[1])"
] |
[
[
"numpy.polyfit",
"numpy.array",
"numpy.zeros_like",
"numpy.average"
]
] |
ahmorsi/models
|
[
"074847040998cb14fc1d21e4757c5a2e82cac3aa"
] |
[
"official/nlp/data/question_answering_dataloader.py"
] |
[
"# Lint as: python3\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Loads dataset for the question answering (e.g, SQuAD) task.\"\"\"\nfrom typing import Mapping, Optional\nimport dataclasses\nimport tensorflow as tf\n\nfrom official.core import input_reader\nfrom official.modeling.hyperparams import config_definitions as cfg\nfrom official.nlp.data import data_loader_factory\n\n\[email protected]\nclass QADataConfig(cfg.DataConfig):\n \"\"\"Data config for question answering task (tasks/question_answering).\"\"\"\n input_path: str = ''\n global_batch_size: int = 48\n is_training: bool = True\n seq_length: int = 384\n # Settings below are question answering specific.\n version_2_with_negative: bool = False\n # Settings below are only used for eval mode.\n input_preprocessed_data_path: str = ''\n doc_stride: int = 128\n query_length: int = 64\n vocab_file: str = ''\n tokenization: str = 'WordPiece' # WordPiece or SentencePiece\n do_lower_case: bool = True\n\n\n@data_loader_factory.register_data_loader_cls(QADataConfig)\nclass QuestionAnsweringDataLoader:\n \"\"\"A class to load dataset for sentence prediction (classification) task.\"\"\"\n\n def __init__(self, params):\n self._params = params\n self._seq_length = params.seq_length\n self._is_training = params.is_training\n\n def _decode(self, record: tf.Tensor):\n \"\"\"Decodes a serialized tf.Example.\"\"\"\n name_to_features = {\n 'input_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),\n 'input_mask': tf.io.FixedLenFeature([self._seq_length], tf.int64),\n 'segment_ids': tf.io.FixedLenFeature([self._seq_length], tf.int64),\n }\n if self._is_training:\n name_to_features['start_positions'] = tf.io.FixedLenFeature([], tf.int64)\n name_to_features['end_positions'] = tf.io.FixedLenFeature([], tf.int64)\n else:\n name_to_features['unique_ids'] = tf.io.FixedLenFeature([], tf.int64)\n example = tf.io.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in example:\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.cast(t, tf.int32)\n example[name] = t\n\n return example\n\n def _parse(self, record: Mapping[str, tf.Tensor]):\n \"\"\"Parses raw tensors into a dict of tensors to be consumed by the model.\"\"\"\n x, y = {}, {}\n for name, tensor in record.items():\n if name in ('start_positions', 'end_positions'):\n y[name] = tensor\n elif name == 'input_ids':\n x['input_word_ids'] = tensor\n elif name == 'segment_ids':\n x['input_type_ids'] = tensor\n else:\n x[name] = tensor\n return (x, y)\n\n def load(self, input_context: Optional[tf.distribute.InputContext] = None):\n \"\"\"Returns a tf.dataset.Dataset.\"\"\"\n reader = input_reader.InputReader(\n params=self._params, decoder_fn=self._decode, parser_fn=self._parse)\n return reader.read(input_context)\n"
] |
[
[
"tensorflow.io.FixedLenFeature",
"tensorflow.io.parse_single_example",
"tensorflow.cast"
]
] |
rsumner31/Detectron
|
[
"021685d42f7e8ac097e2bcf79fecb645f211378e"
] |
[
"lib/core/config.py"
] |
[
"# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n#\n# Based on:\n# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\n\"\"\"Detectron config system.\n\nThis file specifies default config options for Detectron. You should not\nchange values in this file. Instead, you should write a config file (in yaml)\nand use merge_cfg_from_file(yaml_file) to load it and override the default\noptions.\n\nMost tools in the tools directory take a --cfg option to specify an override\nfile and an optional list of override (key, value) pairs:\n - See tools/{train,test}_net.py for example code that uses merge_cfg_from_file\n - See configs/*/*.yaml for example config files\n\nDetectron supports a lot of different model types, each of which has a lot of\ndifferent options. The result is a HUGE set of configuration options.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom ast import literal_eval\nfrom past.builtins import basestring\nfrom utils.collections import AttrDict\nimport copy\nimport logging\nimport numpy as np\nimport os\nimport os.path as osp\nimport yaml\n\nfrom utils.io import cache_url\n\nlogger = logging.getLogger(__name__)\n\n__C = AttrDict()\n# Consumers can get config by:\n# from core.config import cfg\ncfg = __C\n\n\n# Random note: avoid using '.ON' as a config key since yaml converts it to True;\n# prefer 'ENABLED' instead\n\n# ---------------------------------------------------------------------------- #\n# Training options\n# ---------------------------------------------------------------------------- #\n__C.TRAIN = AttrDict()\n\n# Initialize network with weights from this .pkl file\n__C.TRAIN.WEIGHTS = b''\n\n# Datasets to train on\n# Available dataset list: datasets.dataset_catalog.DATASETS.keys()\n# If multiple datasets are listed, the model is trained on their union\n__C.TRAIN.DATASETS = ()\n\n# Scales to use during training\n# Each scale is the pixel size of an image's shortest side\n# If multiple scales are listed, then one is selected uniformly at random for\n# each training image (i.e., scale jitter data augmentation)\n__C.TRAIN.SCALES = (600, )\n\n# Max pixel size of the longest side of a scaled input image\n__C.TRAIN.MAX_SIZE = 1000\n\n# Images *per GPU* in the training minibatch\n# Total images per minibatch = TRAIN.IMS_PER_BATCH * NUM_GPUS\n__C.TRAIN.IMS_PER_BATCH = 2\n\n# RoI minibatch size *per image* (number of regions of interest [ROIs])\n# Total number of RoIs per training minibatch =\n# TRAIN.BATCH_SIZE_PER_IM * TRAIN.IMS_PER_BATCH * NUM_GPUS\n# E.g., a common configuration is: 512 * 2 * 8 = 8192\n__C.TRAIN.BATCH_SIZE_PER_IM = 64\n\n# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)\n__C.TRAIN.FG_FRACTION = 0.25\n\n# Overlap threshold for an RoI to be considered foreground (if >= FG_THRESH)\n__C.TRAIN.FG_THRESH = 0.5\n\n# Overlap threshold for an RoI to be considered background (class = 0 if\n# overlap in [LO, HI))\n__C.TRAIN.BG_THRESH_HI = 0.5\n__C.TRAIN.BG_THRESH_LO = 0.0\n\n# Use horizontally-flipped images during training?\n__C.TRAIN.USE_FLIPPED = True\n\n# Overlap required between an RoI and a ground-truth box in order for that\n# (RoI, gt box) pair to be used as a bounding-box regression training example\n__C.TRAIN.BBOX_THRESH = 0.5\n\n# Snapshot (model checkpoint) period\n# Divide by NUM_GPUS to determine actual period (e.g., 20000/8 => 2500 iters)\n# to allow for linear training schedule scaling\n__C.TRAIN.SNAPSHOT_ITERS = 20000\n\n# Train using these proposals\n# During training, all proposals specified in the file are used (no limit is\n# applied)\n# Proposal files must be in correspondence with the datasets listed in\n# TRAIN.DATASETS\n__C.TRAIN.PROPOSAL_FILES = ()\n\n# Make minibatches from images that have similar aspect ratios (i.e. both\n# tall and thin or both short and wide)\n# This feature is critical for saving memory (and makes training slightly\n# faster)\n__C.TRAIN.ASPECT_GROUPING = True\n\n# ---------------------------------------------------------------------------- #\n# RPN training options\n# ---------------------------------------------------------------------------- #\n\n# Minimum overlap required between an anchor and ground-truth box for the\n# (anchor, gt box) pair to be a positive example (IOU >= thresh ==> positive RPN\n# example)\n__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7\n\n# Maximum overlap allowed between an anchor and ground-truth box for the\n# (anchor, gt box) pair to be a negative examples (IOU < thresh ==> negative RPN\n# example)\n__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3\n\n# Target fraction of foreground (positive) examples per RPN minibatch\n__C.TRAIN.RPN_FG_FRACTION = 0.5\n\n# Total number of RPN examples per image\n__C.TRAIN.RPN_BATCH_SIZE_PER_IM = 256\n\n# NMS threshold used on RPN proposals (used during end-to-end training with RPN)\n__C.TRAIN.RPN_NMS_THRESH = 0.7\n\n# Number of top scoring RPN proposals to keep before applying NMS\n# When FPN is used, this is *per FPN level* (not total)\n__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000\n\n# Number of top scoring RPN proposals to keep after applying NMS\n# This is the total number of RPN proposals produced (for both FPN and non-FPN\n# cases)\n__C.TRAIN.RPN_POST_NMS_TOP_N = 2000\n\n# Remove RPN anchors that go outside the image by RPN_STRADDLE_THRESH pixels\n# Set to -1 or a large value, e.g. 100000, to disable pruning anchors\n__C.TRAIN.RPN_STRADDLE_THRESH = 0\n\n# Proposal height and width both need to be greater than RPN_MIN_SIZE\n# (at orig image scale; not scale used during training or inference)\n__C.TRAIN.RPN_MIN_SIZE = 0\n\n# Filter proposals that are inside of crowd regions by CROWD_FILTER_THRESH\n# \"Inside\" is measured as: proposal-with-crowd intersection area divided by\n# proposal area\n__C.TRAIN.CROWD_FILTER_THRESH = 0.7\n\n# Ignore ground-truth objects with area < this threshold\n__C.TRAIN.GT_MIN_AREA = -1\n\n# Freeze the backbone architecture during training if set to True\n__C.TRAIN.FREEZE_CONV_BODY = False\n\n# Training will resume from the latest snapshot (model checkpoint) found in the\n# output directory\n__C.TRAIN.AUTO_RESUME = True\n\n\n# ---------------------------------------------------------------------------- #\n# Data loader options\n# ---------------------------------------------------------------------------- #\n__C.DATA_LOADER = AttrDict()\n\n# Number of Python threads to use for the data loader (warning: using too many\n# threads can cause GIL-based interference with Python Ops leading to *slower*\n# training; 4 seems to be the sweet spot in our experience)\n__C.DATA_LOADER.NUM_THREADS = 4\n\n\n# ---------------------------------------------------------------------------- #\n# Inference ('test') options\n# ---------------------------------------------------------------------------- #\n__C.TEST = AttrDict()\n\n# Initialize network with weights from this .pkl file\n__C.TEST.WEIGHTS = b''\n\n# Datasets to test on\n# Available dataset list: datasets.dataset_catalog.DATASETS.keys()\n# If multiple datasets are listed, testing is performed on each one sequentially\n__C.TEST.DATASETS = ()\n\n# Scales to use during testing\n# Each scale is the pixel size of an image's shortest side\n# If multiple scales are given, then all scales are used as in multiscale\n# inference\n__C.TEST.SCALES = (600, )\n\n# Max pixel size of the longest side of a scaled input image\n__C.TEST.MAX_SIZE = 1000\n\n# Overlap threshold used for non-maximum suppression (suppress boxes with\n# IoU >= this threshold)\n__C.TEST.NMS = 0.3\n\n# Apply Fast R-CNN style bounding-box regression if True\n__C.TEST.BBOX_REG = True\n\n# Test using these proposal files (must correspond with TEST.DATASETS)\n__C.TEST.PROPOSAL_FILES = ()\n\n# Limit on the number of proposals per image used during inference\n__C.TEST.PROPOSAL_LIMIT = 2000\n\n# NMS threshold used on RPN proposals\n__C.TEST.RPN_NMS_THRESH = 0.7\n\n# Number of top scoring RPN proposals to keep before applying NMS\n# When FPN is used, this is *per FPN level* (not total)\n__C.TEST.RPN_PRE_NMS_TOP_N = 12000\n\n# Number of top scoring RPN proposals to keep after applying NMS\n# This is the total number of RPN proposals produced (for both FPN and non-FPN\n# cases)\n__C.TEST.RPN_POST_NMS_TOP_N = 2000\n\n# Proposal height and width both need to be greater than RPN_MIN_SIZE\n# (at orig image scale; not scale used during training or inference)\n__C.TEST.RPN_MIN_SIZE = 0\n\n# Maximum number of detections to return per image (100 is based on the limit\n# established for the COCO dataset)\n__C.TEST.DETECTIONS_PER_IM = 100\n\n# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to\n# balance obtaining high recall with not having too many low precision\n# detections that will slow down inference post processing steps (like NMS)\n__C.TEST.SCORE_THRESH = 0.05\n\n# Save detection results files if True\n# If false, results files are cleaned up (they can be large) after local\n# evaluation\n__C.TEST.COMPETITION_MODE = True\n\n# Evaluate detections with the COCO json dataset eval code even if it's not the\n# evaluation code for the dataset (e.g. evaluate PASCAL VOC results using the\n# COCO API to get COCO style AP on PASCAL VOC)\n__C.TEST.FORCE_JSON_DATASET_EVAL = False\n\n# [Inferred value; do not set directly in a config]\n# Indicates if precomputed proposals are used at test time\n# Not set for 1-stage models and 2-stage models with RPN subnetwork enabled\n__C.TEST.PRECOMPUTED_PROPOSALS = True\n\n# [Inferred value; do not set directly in a config]\n# Active dataset to test on\n__C.TEST.DATASET = b''\n\n# [Inferred value; do not set directly in a config]\n# Active proposal file to use\n__C.TEST.PROPOSAL_FILE = b''\n\n\n# ---------------------------------------------------------------------------- #\n# Test-time augmentations for bounding box detection\n# See configs/test_time_aug/e2e_mask_rcnn_R-50-FPN_2x.yaml for an example\n# ---------------------------------------------------------------------------- #\n__C.TEST.BBOX_AUG = AttrDict()\n\n# Enable test-time augmentation for bounding box detection if True\n__C.TEST.BBOX_AUG.ENABLED = False\n\n# Heuristic used to combine predicted box scores\n# Valid options: ('ID', 'AVG', 'UNION')\n__C.TEST.BBOX_AUG.SCORE_HEUR = b'UNION'\n\n# Heuristic used to combine predicted box coordinates\n# Valid options: ('ID', 'AVG', 'UNION')\n__C.TEST.BBOX_AUG.COORD_HEUR = b'UNION'\n\n# Horizontal flip at the original scale (id transform)\n__C.TEST.BBOX_AUG.H_FLIP = False\n\n# Each scale is the pixel size of an image's shortest side\n__C.TEST.BBOX_AUG.SCALES = ()\n\n# Max pixel size of the longer side\n__C.TEST.BBOX_AUG.MAX_SIZE = 4000\n\n# Horizontal flip at each scale\n__C.TEST.BBOX_AUG.SCALE_H_FLIP = False\n\n# Apply scaling based on object size\n__C.TEST.BBOX_AUG.SCALE_SIZE_DEP = False\n__C.TEST.BBOX_AUG.AREA_TH_LO = 50**2\n__C.TEST.BBOX_AUG.AREA_TH_HI = 180**2\n\n# Each aspect ratio is relative to image width\n__C.TEST.BBOX_AUG.ASPECT_RATIOS = ()\n\n# Horizontal flip at each aspect ratio\n__C.TEST.BBOX_AUG.ASPECT_RATIO_H_FLIP = False\n\n# ---------------------------------------------------------------------------- #\n# Test-time augmentations for mask detection\n# See configs/test_time_aug/e2e_mask_rcnn_R-50-FPN_2x.yaml for an example\n# ---------------------------------------------------------------------------- #\n__C.TEST.MASK_AUG = AttrDict()\n\n# Enable test-time augmentation for instance mask detection if True\n__C.TEST.MASK_AUG.ENABLED = False\n\n# Heuristic used to combine mask predictions\n# SOFT prefix indicates that the computation is performed on soft masks\n# Valid options: ('SOFT_AVG', 'SOFT_MAX', 'LOGIT_AVG')\n__C.TEST.MASK_AUG.HEUR = b'SOFT_AVG'\n\n# Horizontal flip at the original scale (id transform)\n__C.TEST.MASK_AUG.H_FLIP = False\n\n# Each scale is the pixel size of an image's shortest side\n__C.TEST.MASK_AUG.SCALES = ()\n\n# Max pixel size of the longer side\n__C.TEST.MASK_AUG.MAX_SIZE = 4000\n\n# Horizontal flip at each scale\n__C.TEST.MASK_AUG.SCALE_H_FLIP = False\n\n# Apply scaling based on object size\n__C.TEST.MASK_AUG.SCALE_SIZE_DEP = False\n__C.TEST.MASK_AUG.AREA_TH = 180**2\n\n# Each aspect ratio is relative to image width\n__C.TEST.MASK_AUG.ASPECT_RATIOS = ()\n\n# Horizontal flip at each aspect ratio\n__C.TEST.MASK_AUG.ASPECT_RATIO_H_FLIP = False\n\n# ---------------------------------------------------------------------------- #\n# Test-augmentations for keypoints detection\n# configs/test_time_aug/keypoint_rcnn_R-50-FPN_1x.yaml\n# ---------------------------------------------------------------------------- #\n__C.TEST.KPS_AUG = AttrDict()\n\n# Enable test-time augmentation for keypoint detection if True\n__C.TEST.KPS_AUG.ENABLED = False\n\n# Heuristic used to combine keypoint predictions\n# Valid options: ('HM_AVG', 'HM_MAX')\n__C.TEST.KPS_AUG.HEUR = b'HM_AVG'\n\n# Horizontal flip at the original scale (id transform)\n__C.TEST.KPS_AUG.H_FLIP = False\n\n# Each scale is the pixel size of an image's shortest side\n__C.TEST.KPS_AUG.SCALES = ()\n\n# Max pixel size of the longer side\n__C.TEST.KPS_AUG.MAX_SIZE = 4000\n\n# Horizontal flip at each scale\n__C.TEST.KPS_AUG.SCALE_H_FLIP = False\n\n# Apply scaling based on object size\n__C.TEST.KPS_AUG.SCALE_SIZE_DEP = False\n__C.TEST.KPS_AUG.AREA_TH = 180**2\n\n# Eeach aspect ratio is realtive to image width\n__C.TEST.KPS_AUG.ASPECT_RATIOS = ()\n\n# Horizontal flip at each aspect ratio\n__C.TEST.KPS_AUG.ASPECT_RATIO_H_FLIP = False\n\n# ---------------------------------------------------------------------------- #\n# Soft NMS\n# ---------------------------------------------------------------------------- #\n__C.TEST.SOFT_NMS = AttrDict()\n\n# Use soft NMS instead of standard NMS if set to True\n__C.TEST.SOFT_NMS.ENABLED = False\n# See soft NMS paper for definition of these options\n__C.TEST.SOFT_NMS.METHOD = b'linear'\n__C.TEST.SOFT_NMS.SIGMA = 0.5\n# For the soft NMS overlap threshold, we simply use TEST.NMS\n\n# ---------------------------------------------------------------------------- #\n# Bounding box voting (from the Multi-Region CNN paper)\n# ---------------------------------------------------------------------------- #\n__C.TEST.BBOX_VOTE = AttrDict()\n\n# Use box voting if set to True\n__C.TEST.BBOX_VOTE.ENABLED = False\n\n# We use TEST.NMS threshold for the NMS step. VOTE_TH overlap threshold\n# is used to select voting boxes (IoU >= VOTE_TH) for each box that survives NMS\n__C.TEST.BBOX_VOTE.VOTE_TH = 0.8\n\n# The method used to combine scores when doing bounding box voting\n# Valid options include ('ID', 'AVG', 'IOU_AVG', 'GENERALIZED_AVG', 'QUASI_SUM')\n__C.TEST.BBOX_VOTE.SCORING_METHOD = b'ID'\n\n# Hyperparameter used by the scoring method (it has different meanings for\n# different methods)\n__C.TEST.BBOX_VOTE.SCORING_METHOD_BETA = 1.0\n\n\n# ---------------------------------------------------------------------------- #\n# Model options\n# ---------------------------------------------------------------------------- #\n__C.MODEL = AttrDict()\n\n# The type of model to use\n# The string must match a function in the modeling.model_builder module\n# (e.g., 'generalized_rcnn', 'mask_rcnn', ...)\n__C.MODEL.TYPE = b''\n\n# The backbone conv body to use\n# The string must match a function that is imported in modeling.model_builder\n# (e.g., 'FPN.add_fpn_ResNet101_conv5_body' to specify a ResNet-101-FPN\n# backbone)\n__C.MODEL.CONV_BODY = b''\n\n# Number of classes in the dataset; must be set\n# E.g., 81 for COCO (80 foreground + 1 background)\n__C.MODEL.NUM_CLASSES = -1\n\n# Use a class agnostic bounding box regressor instead of the default per-class\n# regressor\n__C.MODEL.CLS_AGNOSTIC_BBOX_REG = False\n\n# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets\n# These are empirically chosen to approximately lead to unit variance targets\n__C.MODEL.BBOX_REG_WEIGHTS = (10., 10., 5., 5.)\n\n# The meaning of FASTER_RCNN depends on the context (training vs. inference):\n# 1) During training, FASTER_RCNN = True means that end-to-end training will be\n# used to jointly train the RPN subnetwork and the Fast R-CNN subnetwork\n# (Faster R-CNN = RPN + Fast R-CNN).\n# 2) During inference, FASTER_RCNN = True means that the model's RPN subnetwork\n# will be used to generate proposals rather than relying on precomputed\n# proposals. Note that FASTER_RCNN = True can be used at inference time even\n# if the Faster R-CNN model was trained with stagewise training (which\n# consists of alternating between RPN and Fast R-CNN training in a way that\n# finally leads to a single network).\n__C.MODEL.FASTER_RCNN = False\n\n# Indicates the model makes instance mask predictions (as in Mask R-CNN)\n__C.MODEL.MASK_ON = False\n\n# Indicates the model makes keypoint predictions (as in Mask R-CNN for\n# keypoints)\n__C.MODEL.KEYPOINTS_ON = False\n\n# Indicates the model's computation terminates with the production of RPN\n# proposals (i.e., it outputs proposals ONLY, no actual object detections)\n__C.MODEL.RPN_ONLY = False\n\n# Caffe2 net execution type\n# Use 'prof_dag' to get profiling statistics\n__C.MODEL.EXECUTION_TYPE = b'dag'\n\n\n# ---------------------------------------------------------------------------- #\n# RetinaNet options\n# ---------------------------------------------------------------------------- #\n__C.RETINANET = AttrDict()\n\n# RetinaNet is used (instead of Fast/er/Mask R-CNN/R-FCN/RPN) if True\n__C.RETINANET.RETINANET_ON = False\n\n# Anchor aspect ratios to use\n__C.RETINANET.ASPECT_RATIOS = (0.5, 1.0, 2.0)\n\n# Anchor scales per octave\n__C.RETINANET.SCALES_PER_OCTAVE = 3\n\n# At each FPN level, we generate anchors based on their scale, aspect_ratio,\n# stride of the level, and we multiply the resulting anchor by ANCHOR_SCALE\n__C.RETINANET.ANCHOR_SCALE = 4\n\n# Convolutions to use in the cls and bbox tower\n# NOTE: this doesn't include the last conv for logits\n__C.RETINANET.NUM_CONVS = 4\n\n# Weight for bbox_regression loss\n__C.RETINANET.BBOX_REG_WEIGHT = 1.0\n\n# Smooth L1 loss beta for bbox regression\n__C.RETINANET.BBOX_REG_BETA = 0.11\n\n# During inference, #locs to select based on cls score before NMS is performed\n# per FPN level\n__C.RETINANET.PRE_NMS_TOP_N = 1000\n\n# IoU overlap ratio for labeling an anchor as positive\n# Anchors with >= iou overlap are labeled positive\n__C.RETINANET.POSITIVE_OVERLAP = 0.5\n\n# IoU overlap ratio for labeling an anchor as negative\n# Anchors with < iou overlap are labeled negative\n__C.RETINANET.NEGATIVE_OVERLAP = 0.4\n\n# Focal loss parameter: alpha\n__C.RETINANET.LOSS_ALPHA = 0.25\n\n# Focal loss parameter: gamma\n__C.RETINANET.LOSS_GAMMA = 2.0\n\n# Prior prob for the positives at the beginning of training. This is used to set\n# the bias init for the logits layer\n__C.RETINANET.PRIOR_PROB = 0.01\n\n# Whether classification and bbox branch tower should be shared or not\n__C.RETINANET.SHARE_CLS_BBOX_TOWER = False\n\n# Use class specific bounding box regression instead of the default class\n# agnostic regression\n__C.RETINANET.CLASS_SPECIFIC_BBOX = False\n\n# Whether softmax should be used in classification branch training\n__C.RETINANET.SOFTMAX = False\n\n# Inference cls score threshold, anchors with score > INFERENCE_TH are\n# considered for inference\n__C.RETINANET.INFERENCE_TH = 0.05\n\n\n# ---------------------------------------------------------------------------- #\n# Solver options\n# Note: all solver options are used exactly as specified; the implication is\n# that if you switch from training on 1 GPU to N GPUs, you MUST adjust the\n# solver configuration accordingly. We suggest using gradual warmup and the\n# linear learning rate scaling rule as described in\n# \"Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour\" Goyal et al.\n# https://arxiv.org/abs/1706.02677\n# ---------------------------------------------------------------------------- #\n__C.SOLVER = AttrDict()\n\n# Base learning rate for the specified schedule\n__C.SOLVER.BASE_LR = 0.001\n\n# Schedule type (see functions in utils.lr_policy for options)\n# E.g., 'step', 'steps_with_decay', ...\n__C.SOLVER.LR_POLICY = b'step'\n\n# Some LR Policies (by example):\n# 'step'\n# lr = SOLVER.BASE_LR * SOLVER.GAMMA ** (cur_iter // SOLVER.STEP_SIZE)\n# 'steps_with_decay'\n# SOLVER.STEPS = [0, 60000, 80000]\n# SOLVER.GAMMA = 0.1\n# lr = SOLVER.BASE_LR * SOLVER.GAMMA ** current_step\n# iters [0, 59999] are in current_step = 0, iters [60000, 79999] are in\n# current_step = 1, and so on\n# 'steps_with_lrs'\n# SOLVER.STEPS = [0, 60000, 80000]\n# SOLVER.LRS = [0.02, 0.002, 0.0002]\n# lr = LRS[current_step]\n\n# Hyperparameter used by the specified policy\n# For 'step', the current LR is multiplied by SOLVER.GAMMA at each step\n__C.SOLVER.GAMMA = 0.1\n\n# Uniform step size for 'steps' policy\n__C.SOLVER.STEP_SIZE = 30000\n\n# Non-uniform step iterations for 'steps_with_decay' or 'steps_with_lrs'\n# policies\n__C.SOLVER.STEPS = []\n\n# Learning rates to use with 'steps_with_lrs' policy\n__C.SOLVER.LRS = []\n\n# Maximum number of SGD iterations\n__C.SOLVER.MAX_ITER = 40000\n\n# Momentum to use with SGD\n__C.SOLVER.MOMENTUM = 0.9\n\n# L2 regularization hyperparameter\n__C.SOLVER.WEIGHT_DECAY = 0.0005\n\n# Warm up to SOLVER.BASE_LR over this number of SGD iterations\n__C.SOLVER.WARM_UP_ITERS = 500\n\n# Start the warm up from SOLVER.BASE_LR * SOLVER.WARM_UP_FACTOR\n__C.SOLVER.WARM_UP_FACTOR = 1.0 / 3.0\n\n# WARM_UP_METHOD can be either 'constant' or 'linear' (i.e., gradual)\n__C.SOLVER.WARM_UP_METHOD = 'linear'\n\n# Scale the momentum update history by new_lr / old_lr when updating the\n# learning rate (this is correct given MomentumSGDUpdateOp)\n__C.SOLVER.SCALE_MOMENTUM = True\n# Only apply the correction if the relative LR change exceeds this threshold\n# (prevents ever change in linear warm up from scaling the momentum by a tiny\n# amount; momentum scaling is only important if the LR change is large)\n__C.SOLVER.SCALE_MOMENTUM_THRESHOLD = 1.1\n\n# Suppress logging of changes to LR unless the relative change exceeds this\n# threshold (prevents linear warm up from spamming the training log)\n__C.SOLVER.LOG_LR_CHANGE_THRESHOLD = 1.1\n\n\n# ---------------------------------------------------------------------------- #\n# Fast R-CNN options\n# ---------------------------------------------------------------------------- #\n__C.FAST_RCNN = AttrDict()\n\n# The type of RoI head to use for bounding box classification and regression\n# The string must match a function this is imported in modeling.model_builder\n# (e.g., 'head_builder.add_roi_2mlp_head' to specify a two hidden layer MLP)\n__C.FAST_RCNN.ROI_BOX_HEAD = b''\n\n# Hidden layer dimension when using an MLP for the RoI box head\n__C.FAST_RCNN.MLP_HEAD_DIM = 1024\n\n# RoI transformation function (e.g., RoIPool or RoIAlign)\n# (RoIPoolF is the same as RoIPool; ignore the trailing 'F')\n__C.FAST_RCNN.ROI_XFORM_METHOD = b'RoIPoolF'\n\n# Number of grid sampling points in RoIAlign (usually use 2)\n# Only applies to RoIAlign\n__C.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO = 0\n\n# RoI transform output resolution\n# Note: some models may have constraints on what they can use, e.g. they use\n# pretrained FC layers like in VGG16, and will ignore this option\n__C.FAST_RCNN.ROI_XFORM_RESOLUTION = 14\n\n\n# ---------------------------------------------------------------------------- #\n# RPN options\n# ---------------------------------------------------------------------------- #\n__C.RPN = AttrDict()\n\n# [Infered value; do not set directly in a config]\n# Indicates that the model contains an RPN subnetwork\n__C.RPN.RPN_ON = False\n\n# RPN anchor sizes given in absolute pixels w.r.t. the scaled network input\n# Note: these options are *not* used by FPN RPN; see FPN.RPN* options\n__C.RPN.SIZES = (64, 128, 256, 512)\n\n# Stride of the feature map that RPN is attached\n__C.RPN.STRIDE = 16\n\n# RPN anchor aspect ratios\n__C.RPN.ASPECT_RATIOS = (0.5, 1, 2)\n\n\n# ---------------------------------------------------------------------------- #\n# FPN options\n# ---------------------------------------------------------------------------- #\n__C.FPN = AttrDict()\n\n# FPN is enabled if True\n__C.FPN.FPN_ON = False\n\n# Channel dimension of the FPN feature levels\n__C.FPN.DIM = 256\n\n# Initialize the lateral connections to output zero if True\n__C.FPN.ZERO_INIT_LATERAL = False\n\n# Stride of the coarsest FPN level\n# This is needed so the input can be padded properly\n__C.FPN.COARSEST_STRIDE = 32\n\n#\n# FPN may be used for just RPN, just object detection, or both\n#\n\n# Use FPN for RoI transform for object detection if True\n__C.FPN.MULTILEVEL_ROIS = False\n# Hyperparameters for the RoI-to-FPN level mapping heuristic\n__C.FPN.ROI_CANONICAL_SCALE = 224 # s0\n__C.FPN.ROI_CANONICAL_LEVEL = 4 # k0: where s0 maps to\n# Coarsest level of the FPN pyramid\n__C.FPN.ROI_MAX_LEVEL = 5\n# Finest level of the FPN pyramid\n__C.FPN.ROI_MIN_LEVEL = 2\n\n# Use FPN for RPN if True\n__C.FPN.MULTILEVEL_RPN = False\n# Coarsest level of the FPN pyramid\n__C.FPN.RPN_MAX_LEVEL = 6\n# Finest level of the FPN pyramid\n__C.FPN.RPN_MIN_LEVEL = 2\n# FPN RPN anchor aspect ratios\n__C.FPN.RPN_ASPECT_RATIOS = (0.5, 1, 2)\n# RPN anchors start at this size on RPN_MIN_LEVEL\n# The anchor size doubled each level after that\n# With a default of 32 and levels 2 to 6, we get anchor sizes of 32 to 512\n__C.FPN.RPN_ANCHOR_START_SIZE = 32\n# Use extra FPN levels, as done in the RetinaNet paper\n__C.FPN.EXTRA_CONV_LEVELS = False\n\n\n# ---------------------------------------------------------------------------- #\n# Mask R-CNN options (\"MRCNN\" means Mask R-CNN)\n# ---------------------------------------------------------------------------- #\n__C.MRCNN = AttrDict()\n\n# The type of RoI head to use for instance mask prediction\n# The string must match a function this is imported in modeling.model_builder\n# (e.g., 'mask_rcnn_heads.ResNet_mask_rcnn_fcn_head_v1up4convs')\n__C.MRCNN.ROI_MASK_HEAD = b''\n\n# Resolution of mask predictions\n__C.MRCNN.RESOLUTION = 14\n\n# RoI transformation function and associated options\n__C.MRCNN.ROI_XFORM_METHOD = b'RoIAlign'\n\n# RoI transformation function (e.g., RoIPool or RoIAlign)\n__C.MRCNN.ROI_XFORM_RESOLUTION = 7\n\n# Number of grid sampling points in RoIAlign (usually use 2)\n# Only applies to RoIAlign\n__C.MRCNN.ROI_XFORM_SAMPLING_RATIO = 0\n\n# Number of channels in the mask head\n__C.MRCNN.DIM_REDUCED = 256\n\n# Use dilated convolution in the mask head\n__C.MRCNN.DILATION = 2\n\n# Upsample the predicted masks by this factor\n__C.MRCNN.UPSAMPLE_RATIO = 1\n\n# Use a fully-connected layer to predict the final masks instead of a conv layer\n__C.MRCNN.USE_FC_OUTPUT = False\n\n# Weight initialization method for the mask head and mask output layers\n__C.MRCNN.CONV_INIT = b'GaussianFill'\n\n# Use class specific mask predictions if True (otherwise use class agnostic mask\n# predictions)\n__C.MRCNN.CLS_SPECIFIC_MASK = True\n\n# Multi-task loss weight for masks\n__C.MRCNN.WEIGHT_LOSS_MASK = 1.0\n\n# Binarization threshold for converting soft masks to hard masks\n__C.MRCNN.THRESH_BINARIZE = 0.5\n\n\n# ---------------------------------------------------------------------------- #\n# Keyoint Mask R-CNN options (\"KRCNN\" = Mask R-CNN with Keypoint support)\n# ---------------------------------------------------------------------------- #\n__C.KRCNN = AttrDict()\n\n# The type of RoI head to use for instance keypoint prediction\n# The string must match a function this is imported in modeling.model_builder\n# (e.g., 'keypoint_rcnn_heads.add_roi_pose_head_v1convX')\n__C.KRCNN.ROI_KEYPOINTS_HEAD = b''\n\n# Output size (and size loss is computed on), e.g., 56x56\n__C.KRCNN.HEATMAP_SIZE = -1\n\n# Use bilinear interpolation to upsample the final heatmap by this factor\n__C.KRCNN.UP_SCALE = -1\n\n# Apply a ConvTranspose layer to the hidden representation computed by the\n# keypoint head prior to predicting the per-keypoint heatmaps\n__C.KRCNN.USE_DECONV = False\n# Channel dimension of the hidden representation produced by the ConvTranspose\n__C.KRCNN.DECONV_DIM = 256\n\n# Use a ConvTranspose layer to predict the per-keypoint heatmaps\n__C.KRCNN.USE_DECONV_OUTPUT = False\n\n# Use dilation in the keypoint head\n__C.KRCNN.DILATION = 1\n\n# Size of the kernels to use in all ConvTranspose operations\n__C.KRCNN.DECONV_KERNEL = 4\n\n# Number of keypoints in the dataset (e.g., 17 for COCO)\n__C.KRCNN.NUM_KEYPOINTS = -1\n\n# Number of stacked Conv layers in keypoint head\n__C.KRCNN.NUM_STACKED_CONVS = 8\n\n# Dimension of the hidden representation output by the keypoint head\n__C.KRCNN.CONV_HEAD_DIM = 256\n\n# Conv kernel size used in the keypoint head\n__C.KRCNN.CONV_HEAD_KERNEL = 3\n# Conv kernel weight filling function\n__C.KRCNN.CONV_INIT = b'GaussianFill'\n\n# Use NMS based on OKS if True\n__C.KRCNN.NMS_OKS = False\n\n# Source of keypoint confidence\n# Valid options: ('bbox', 'logit', 'prob')\n__C.KRCNN.KEYPOINT_CONFIDENCE = b'bbox'\n\n# Standard ROI XFORM options (see FAST_RCNN or MRCNN options)\n__C.KRCNN.ROI_XFORM_METHOD = b'RoIAlign'\n__C.KRCNN.ROI_XFORM_RESOLUTION = 7\n__C.KRCNN.ROI_XFORM_SAMPLING_RATIO = 0\n\n# Minimum number of labeled keypoints that must exist in a minibatch (otherwise\n# the minibatch is discarded)\n__C.KRCNN.MIN_KEYPOINT_COUNT_FOR_VALID_MINIBATCH = 20\n\n# When infering the keypoint locations from the heatmap, don't scale the heatmap\n# below this minimum size\n__C.KRCNN.INFERENCE_MIN_SIZE = 0\n\n# Multi-task loss weight to use for keypoints\n# Recommended values:\n# - use 1.0 if KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS is True\n# - use 4.0 if KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS is False\n__C.KRCNN.LOSS_WEIGHT = 1.0\n\n# Normalize by the total number of visible keypoints in the minibatch if True.\n# Otherwise, normalize by the total number of keypoints that could ever exist\n# in the minibatch. See comments in modeling.model_builder.add_keypoint_losses\n# for detailed discussion.\n__C.KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS = True\n\n\n# ---------------------------------------------------------------------------- #\n# R-FCN options\n# ---------------------------------------------------------------------------- #\n__C.RFCN = AttrDict()\n\n# Position-sensitive RoI pooling output grid size (height and width)\n__C.RFCN.PS_GRID_SIZE = 3\n\n\n# ---------------------------------------------------------------------------- #\n# ResNets options (\"ResNets\" = ResNet and ResNeXt)\n# ---------------------------------------------------------------------------- #\n__C.RESNETS = AttrDict()\n\n# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt\n__C.RESNETS.NUM_GROUPS = 1\n\n# Baseline width of each group\n__C.RESNETS.WIDTH_PER_GROUP = 64\n\n# Place the stride 2 conv on the 1x1 filter\n# Use True only for the original MSRA ResNet; use False for C2 and Torch models\n__C.RESNETS.STRIDE_1X1 = True\n\n# Residual transformation function\n__C.RESNETS.TRANS_FUNC = b'bottleneck_transformation'\n\n# Apply dilation in stage \"res5\"\n__C.RESNETS.RES5_DILATION = 1\n\n\n# ---------------------------------------------------------------------------- #\n# Misc options\n# ---------------------------------------------------------------------------- #\n\n# Number of GPUs to use (applies to both training and testing)\n__C.NUM_GPUS = 1\n\n# Use NCCL for all reduce, otherwise use muji\n# Warning: if set to True, you may experience deadlocks\n__C.USE_NCCL = False\n\n# The mapping from image coordinates to feature map coordinates might cause\n# some boxes that are distinct in image space to become identical in feature\n# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor\n# for identifying duplicate boxes.\n# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16\n__C.DEDUP_BOXES = 1 / 16.\n\n# Clip bounding box transformation predictions to prevent np.exp from\n# overflowing\n# Heuristic choice based on that would scale a 16 pixel anchor up to 1000 pixels\n__C.BBOX_XFORM_CLIP = np.log(1000. / 16.)\n\n# Pixel mean values (BGR order) as a (1, 1, 3) array\n# We use the same pixel mean for all networks even though it's not exactly what\n# they were trained with\n# \"Fun\" fact: the history of where these values comes from is lost\n__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])\n\n# For reproducibility...but not really because modern fast GPU libraries use\n# non-deterministic op implementations\n__C.RNG_SEED = 3\n\n# A small number that's used many times\n__C.EPS = 1e-14\n\n# Root directory of project\n__C.ROOT_DIR = os.getcwd()\n\n# Output basedir\n__C.OUTPUT_DIR = b'/tmp'\n\n# Name (or path to) the matlab executable\n__C.MATLAB = b'matlab'\n\n# Reduce memory usage with memonger gradient blob sharing\n__C.MEMONGER = True\n\n# Futher reduce memory by allowing forward pass activations to be shared when\n# possible. Note that this will cause activation blob inspection (values,\n# shapes, etc.) to be meaningless when activation blobs are reused.\n__C.MEMONGER_SHARE_ACTIVATIONS = False\n\n# Dump detection visualizations\n__C.VIS = False\n\n# Score threshold for visualization\n__C.VIS_TH = 0.9\n\n# Expected results should take the form of a list of expectations, each\n# specified by four elements (dataset, task, metric, expected value). For\n# example: [['coco_2014_minival', 'box_proposal', 'AR@1000', 0.387]]\n__C.EXPECTED_RESULTS = []\n# Absolute and relative tolerance to use when comparing to EXPECTED_RESULTS\n__C.EXPECTED_RESULTS_RTOL = 0.1\n__C.EXPECTED_RESULTS_ATOL = 0.005\n# Set to send email in case of an EXPECTED_RESULTS failure\n__C.EXPECTED_RESULTS_EMAIL = b''\n\n# Models and proposals referred to by URL are downloaded to a local cache\n# specified by DOWNLOAD_CACHE\n__C.DOWNLOAD_CACHE = b'/tmp/detectron-download-cache'\n\n\n# ---------------------------------------------------------------------------- #\n# Cluster options\n# ---------------------------------------------------------------------------- #\n__C.CLUSTER = AttrDict()\n\n# Flag to indicate if the code is running in a cluster environment\n__C.CLUSTER.ON_CLUSTER = False\n\n\n# ---------------------------------------------------------------------------- #\n# Deprecated options\n# If an option is removed from the code and you don't want to break existing\n# yaml configs, you can add the full config key as a string to the set below.\n# ---------------------------------------------------------------------------- #\n_DEPCRECATED_KEYS = set(\n (\n 'FINAL_MSG',\n 'MODEL.DILATION',\n 'ROOT_GPU_ID',\n 'RPN.ON',\n 'TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED',\n 'TRAIN.DROPOUT',\n 'USE_GPU_NMS',\n 'TEST.NUM_TEST_IMAGES',\n )\n)\n\n# ---------------------------------------------------------------------------- #\n# Renamed options\n# If you rename a config option, record the mapping from the old name to the new\n# name in the dictionary below. Optionally, if the type also changed, you can\n# make the value a tuple that specifies first the renamed key and then\n# instructions for how to edit the config file.\n# ---------------------------------------------------------------------------- #\n_RENAMED_KEYS = {\n 'EXAMPLE.RENAMED.KEY': 'EXAMPLE.KEY', # Dummy example to follow\n 'MODEL.PS_GRID_SIZE': 'RFCN.PS_GRID_SIZE',\n 'MODEL.ROI_HEAD': 'FAST_RCNN.ROI_BOX_HEAD',\n 'MRCNN.MASK_HEAD_NAME': 'MRCNN.ROI_MASK_HEAD',\n 'TRAIN.DATASET': (\n 'TRAIN.DATASETS',\n \"Also convert to a tuple, e.g., \" +\n \"'coco_2014_train' -> ('coco_2014_train',) or \" +\n \"'coco_2014_train:coco_2014_valminusminival' -> \" +\n \"('coco_2014_train', 'coco_2014_valminusminival')\"\n ),\n 'TRAIN.PROPOSAL_FILE': (\n 'TRAIN.PROPOSAL_FILES',\n \"Also convert to a tuple, e.g., \" +\n \"'path/to/file' -> ('path/to/file',) or \" +\n \"'path/to/file1:path/to/file2' -> \" +\n \"('path/to/file1', 'path/to/file2')\"\n ),\n}\n\n\ndef assert_and_infer_cfg(cache_urls=True):\n if __C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN:\n __C.RPN.RPN_ON = True\n if __C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON:\n __C.TEST.PRECOMPUTED_PROPOSALS = False\n if cache_urls:\n cache_cfg_urls()\n\n\ndef cache_cfg_urls():\n \"\"\"Download URLs in the config, cache them locally, and rewrite cfg to make\n use of the locally cached file.\n \"\"\"\n __C.TRAIN.WEIGHTS = cache_url(__C.TRAIN.WEIGHTS, __C.DOWNLOAD_CACHE)\n __C.TEST.WEIGHTS = cache_url(__C.TEST.WEIGHTS, __C.DOWNLOAD_CACHE)\n __C.TRAIN.PROPOSAL_FILES = tuple(\n [cache_url(f, __C.DOWNLOAD_CACHE) for f in __C.TRAIN.PROPOSAL_FILES]\n )\n __C.TEST.PROPOSAL_FILES = tuple(\n [cache_url(f, __C.DOWNLOAD_CACHE) for f in __C.TEST.PROPOSAL_FILES]\n )\n\n\ndef get_output_dir(training=True):\n \"\"\"Get the output directory determined by the current global config.\"\"\"\n dataset = __C.TRAIN.DATASETS if training else __C.TEST.DATASETS\n dataset = ':'.join(dataset)\n tag = 'train' if training else 'test'\n # <output-dir>/<train|test>/<dataset>/<model-type>/\n outdir = osp.join(__C.OUTPUT_DIR, tag, dataset, __C.MODEL.TYPE)\n if not osp.exists(outdir):\n os.makedirs(outdir)\n return outdir\n\n\ndef merge_cfg_from_file(cfg_filename):\n \"\"\"Load a yaml config file and merge it into the global config.\"\"\"\n with open(cfg_filename, 'r') as f:\n yaml_cfg = AttrDict(yaml.load(f))\n _merge_a_into_b(yaml_cfg, __C)\n\n\ndef merge_cfg_from_cfg(cfg_other):\n \"\"\"Merge `cfg_other` into the global config.\"\"\"\n _merge_a_into_b(cfg_other, __C)\n\n\ndef merge_cfg_from_list(cfg_list):\n \"\"\"Merge config keys, values in a list (e.g., from command line) into the\n global config. For example, `cfg_list = ['TEST.NMS', 0.5]`.\n \"\"\"\n assert len(cfg_list) % 2 == 0\n for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):\n if _key_is_deprecated(full_key):\n continue\n if _key_is_renamed(full_key):\n _raise_key_rename_error(full_key)\n key_list = full_key.split('.')\n d = __C\n for subkey in key_list[:-1]:\n assert subkey in d, 'Non-existent key: {}'.format(full_key)\n d = d[subkey]\n subkey = key_list[-1]\n assert subkey in d, 'Non-existent key: {}'.format(full_key)\n value = _decode_cfg_value(v)\n value = _check_and_coerce_cfg_value_type(\n value, d[subkey], subkey, full_key\n )\n d[subkey] = value\n\n\ndef _merge_a_into_b(a, b, stack=None):\n \"\"\"Merge config dictionary a into config dictionary b, clobbering the\n options in b whenever they are also specified in a.\n \"\"\"\n assert isinstance(a, AttrDict), 'Argument `a` must be an AttrDict'\n assert isinstance(b, AttrDict), 'Argument `b` must be an AttrDict'\n\n for k, v_ in a.items():\n full_key = '.'.join(stack) + '.' + k if stack is not None else k\n # a must specify keys that are in b\n if k not in b:\n if _key_is_deprecated(full_key):\n continue\n elif _key_is_renamed(full_key):\n _raise_key_rename_error(full_key)\n else:\n raise KeyError('Non-existent config key: {}'.format(full_key))\n\n v = copy.deepcopy(v_)\n v = _decode_cfg_value(v)\n v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key)\n\n # Recursively merge dicts\n if isinstance(v, AttrDict):\n try:\n stack_push = [k] if stack is None else stack + [k]\n _merge_a_into_b(v, b[k], stack=stack_push)\n except BaseException:\n raise\n else:\n b[k] = v\n\n\ndef _key_is_deprecated(full_key):\n if full_key in _DEPCRECATED_KEYS:\n logger.warn(\n 'Deprecated config key (ignoring): {}'.format(full_key)\n )\n return True\n return False\n\n\ndef _key_is_renamed(full_key):\n return full_key in _RENAMED_KEYS\n\n\ndef _raise_key_rename_error(full_key):\n new_key = _RENAMED_KEYS[full_key]\n if isinstance(new_key, tuple):\n msg = ' Note: ' + new_key[1]\n new_key = new_key[0]\n else:\n msg = ''\n raise KeyError(\n 'Key {} was renamed to {}; please update your config.{}'.\n format(full_key, new_key, msg)\n )\n\n\ndef _decode_cfg_value(v):\n \"\"\"Decodes a raw config value (e.g., from a yaml config files or command\n line argument) into a Python object.\n \"\"\"\n # Configs parsed from raw yaml will contain dictionary keys that need to be\n # converted to AttrDict objects\n if isinstance(v, dict):\n return AttrDict(v)\n # All remaining processing is only applied to strings\n if not isinstance(v, basestring):\n return v\n # Try to interpret `v` as a:\n # string, number, tuple, list, dict, boolean, or None\n try:\n v = literal_eval(v)\n # The following two excepts allow v to pass through when it represents a\n # string.\n #\n # Longer explanation:\n # The type of v is always a string (before calling literal_eval), but\n # sometimes it *represents* a string and other times a data structure, like\n # a list. In the case that v represents a string, what we got back from the\n # yaml parser is 'foo' *without quotes* (so, not '\"foo\"'). literal_eval is\n # ok with '\"foo\"', but will raise a ValueError if given 'foo'. In other\n # cases, like paths (v = 'foo/bar' and not v = '\"foo/bar\"'), literal_eval\n # will raise a SyntaxError.\n except ValueError:\n pass\n except SyntaxError:\n pass\n return v\n\n\ndef _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key):\n \"\"\"Checks that `value_a`, which is intended to replace `value_b` is of the\n right type. The type is correct if it matches exactly or is one of a few\n cases in which the type can be easily coerced.\n \"\"\"\n # The types must match (with some exceptions)\n type_b = type(value_b)\n type_a = type(value_a)\n if type_a is type_b:\n return value_a\n\n # Exceptions: numpy arrays, strings, tuple<->list\n if isinstance(value_b, np.ndarray):\n value_a = np.array(value_a, dtype=value_b.dtype)\n elif isinstance(value_b, basestring):\n value_a = str(value_a)\n elif isinstance(value_a, tuple) and isinstance(value_b, list):\n value_a = list(value_a)\n elif isinstance(value_a, list) and isinstance(value_b, tuple):\n value_a = tuple(value_a)\n else:\n raise ValueError(\n 'Type mismatch ({} vs. {}) with values ({} vs. {}) for config '\n 'key: {}'.format(type_b, type_a, value_b, value_a, full_key)\n )\n return value_a\n"
] |
[
[
"numpy.log",
"numpy.array"
]
] |
Neuralji/Technical-Analysis-Indicators
|
[
"0aede1d4cd0fc1768496b07f73aec5a929315b3d"
] |
[
"Exp-Moving-Average.py"
] |
[
"import numpy as np\ndef ExpMA(data,period):\n weights=np.exp(np.linspace(-1.,0.,period))\n weights/=weights.sum()\n a=np.convolve(data,weights)[:len(values)]\n a[:period]=a[period]\n return a\n \n"
] |
[
[
"numpy.convolve",
"numpy.linspace"
]
] |
protagohhz/hivemind
|
[
"487fb68feea4d27ede0afdef211f6edc889b1a9e"
] |
[
"hivemind/moe/server/__init__.py"
] |
[
"from __future__ import annotations\n\nimport multiprocessing as mp\nimport multiprocessing.synchronize\nimport threading\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Tuple\n\nimport torch\nfrom multiaddr import Multiaddr\n\nimport hivemind\nfrom hivemind.dht import DHT\nfrom hivemind.moe.server.checkpoints import CheckpointSaver, is_directory, load_experts\nfrom hivemind.moe.server.connection_handler import ConnectionHandler\nfrom hivemind.moe.server.dht_handler import DHTHandlerThread, declare_experts, get_experts\nfrom hivemind.moe.server.expert_backend import ExpertBackend\nfrom hivemind.moe.server.expert_uid import UID_DELIMITER, generate_uids_from_pattern\nfrom hivemind.moe.server.layers import (\n add_custom_models_from_file,\n name_to_block,\n name_to_input,\n register_expert_class,\n schedule_name_to_scheduler,\n)\nfrom hivemind.moe.server.runtime import Runtime\nfrom hivemind.proto.runtime_pb2 import CompressionType\nfrom hivemind.utils import BatchTensorDescriptor, Endpoint, get_free_port, get_logger, get_port, replace_port\n\nlogger = get_logger(__name__)\n\n\nclass Server(threading.Thread):\n \"\"\"\n Server allows you to host \"experts\" - pytorch sub-networks used by Decentralized Mixture of Experts.\n After creation, a server should be started: see Server.run or Server.run_in_background.\n\n A working server does 3 things:\n - processes incoming forward/backward requests via Runtime (created by the server)\n - publishes updates to expert status every :update_period: seconds\n - follows orders from HivemindController - if it exists\n\n :type dht: DHT or None. Server with dht=None will NOT be visible from DHT,\n but it will still support accessing experts directly with RemoteExpert(uid=UID, endpoint=\"IPADDR:PORT\").\n :param expert_backends: dict{expert uid (str) : ExpertBackend} for all expert hosted by this server.\n :param listen_on: server's dht address that determines how it can be accessed. Address and (optional) port\n :param num_connection_handlers: maximum number of simultaneous requests. Please note that the default value of 1\n if too small for normal functioning, we recommend 4 handlers per expert backend.\n :param update_period: how often will server attempt to publish its state (i.e. experts) to the DHT;\n if dht is None, this parameter is ignored.\n :param start: if True, the server will immediately start as a background thread and returns control after server\n is ready (see .ready below)\n \"\"\"\n\n def __init__(\n self,\n dht: Optional[DHT],\n expert_backends: Dict[str, ExpertBackend],\n listen_on: Endpoint = \"0.0.0.0:*\",\n num_connection_handlers: int = 1,\n update_period: int = 30,\n start=False,\n checkpoint_dir=None,\n **kwargs,\n ):\n super().__init__()\n self.dht, self.experts, self.update_period = dht, expert_backends, update_period\n if get_port(listen_on) is None:\n listen_on = replace_port(listen_on, new_port=get_free_port())\n self.listen_on, self.port = listen_on, get_port(listen_on)\n\n self.conn_handlers = [ConnectionHandler(listen_on, self.experts) for _ in range(num_connection_handlers)]\n if checkpoint_dir is not None:\n self.checkpoint_saver = CheckpointSaver(expert_backends, checkpoint_dir, update_period)\n else:\n self.checkpoint_saver = None\n self.runtime = Runtime(self.experts, **kwargs)\n\n if self.dht and self.experts:\n self.dht_handler_thread = DHTHandlerThread(\n experts=self.experts,\n dht=self.dht,\n endpoint=self.listen_on,\n update_period=self.update_period,\n daemon=True,\n )\n\n if start:\n self.run_in_background(await_ready=True)\n\n @classmethod\n def create(\n cls,\n listen_on=\"0.0.0.0:*\",\n num_experts: int = None,\n expert_uids: str = None,\n expert_pattern: str = None,\n expert_cls=\"ffn\",\n hidden_dim=1024,\n optim_cls=torch.optim.Adam,\n scheduler: str = \"none\",\n num_warmup_steps=None,\n num_total_steps=None,\n clip_grad_norm=None,\n num_handlers=None,\n min_batch_size=1,\n max_batch_size=4096,\n device=None,\n no_dht=False,\n initial_peers=(),\n checkpoint_dir: Optional[Path] = None,\n compression=CompressionType.NONE,\n stats_report_interval: Optional[int] = None,\n custom_module_path=None,\n *,\n start: bool,\n ) -> Server:\n \"\"\"\n Instantiate a server with several identical experts. See argparse comments below for details\n :param listen_on: network interface with address and (optional) port, e.g. \"127.0.0.1:1337\" or \"[::]:80\"\n :param num_experts: run this many identical experts\n :param expert_pattern: a string pattern or a list of expert uids, example: myprefix.[0:32].[0:256]\\\n means \"sample random experts between myprefix.0.0 and myprefix.255.255;\n :param expert_uids: spawn experts with these exact uids, overrides num_experts and expert_pattern\n :param expert_cls: expert type from hivemind.moe.server.layers, e.g. 'ffn' or 'transformer';\n :param hidden_dim: main dimension for expert_cls\n :param num_handlers: server will use this many parallel processes to handle incoming requests\n :param min_batch_size: total num examples in the same batch will be greater than this value\n :param max_batch_size: total num examples in the same batch will not exceed this value\n :param device: all experts will use this device in torch notation; default: cuda if available else cpu\n\n :param optim_cls: uses this optimizer to train all experts\n :param scheduler: if not `none`, the name of the expert LR scheduler\n :param num_warmup_steps: the number of warmup steps for LR schedule\n :param num_total_steps: the total number of steps for LR schedule\n :param clip_grad_norm: maximum gradient norm used for clipping\n\n :param no_dht: if specified, the server will not be attached to a dht\n :param initial_peers: multiaddrs of one or more active DHT peers (if you want to join an existing DHT)\n\n :param checkpoint_dir: directory to save and load expert checkpoints\n\n :param compression: if specified, use this compression to pack all inputs, outputs and gradients by all experts\n hosted on this server. For a more fine-grained compression, start server in python and specify compression\n for each BatchTensorProto in ExpertBackend for the respective experts.\n\n :param start: if True, starts server right away and returns when server is ready for requests\n :param stats_report_interval: interval between two reports of batch processing performance statistics\n \"\"\"\n if custom_module_path is not None:\n add_custom_models_from_file(custom_module_path)\n assert expert_cls in name_to_block\n\n if no_dht:\n dht = None\n else:\n dht = hivemind.DHT(initial_peers=initial_peers, start=True)\n visible_maddrs_str = [str(a) for a in dht.get_visible_maddrs()]\n logger.info(f\"Running DHT node on {visible_maddrs_str}, initial peers = {initial_peers}\")\n\n assert (expert_pattern is None and num_experts is None and expert_uids is not None) or (\n num_experts is not None and expert_uids is None\n ), \"Please provide either expert_uids *or* num_experts (possibly with expert_pattern), but not both\"\n\n if expert_uids is None:\n if checkpoint_dir is not None:\n assert is_directory(checkpoint_dir)\n expert_uids = [\n child.name for child in checkpoint_dir.iterdir() if (child / \"checkpoint_last.pt\").exists()\n ]\n total_experts_in_checkpoint = len(expert_uids)\n logger.info(f\"Located {total_experts_in_checkpoint} checkpoints for experts {expert_uids}\")\n\n if total_experts_in_checkpoint > num_experts:\n raise ValueError(\n f\"Found {total_experts_in_checkpoint} checkpoints, but num_experts is set to {num_experts}, \"\n f\"which is smaller. Either increase num_experts or remove unneeded checkpoints.\"\n )\n else:\n expert_uids = []\n\n uids_to_generate = num_experts - len(expert_uids)\n if uids_to_generate > 0:\n logger.info(f\"Generating {uids_to_generate} expert uids from pattern {expert_pattern}\")\n expert_uids.extend(generate_uids_from_pattern(uids_to_generate, expert_pattern, dht))\n\n num_experts = len(expert_uids)\n num_handlers = num_handlers if num_handlers is not None else num_experts * 8\n optim_cls = optim_cls if optim_cls is not None else partial(torch.optim.SGD, lr=0.0)\n device = device or (\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n sample_input = name_to_input[expert_cls](3, hidden_dim)\n if isinstance(sample_input, tuple):\n args_schema = tuple(BatchTensorDescriptor.from_tensor(arg, compression) for arg in sample_input)\n else:\n args_schema = (BatchTensorDescriptor.from_tensor(sample_input, compression),)\n\n scheduler = schedule_name_to_scheduler[scheduler]\n\n # initialize experts\n experts = {}\n for expert_uid in expert_uids:\n expert = name_to_block[expert_cls](hidden_dim)\n experts[expert_uid] = hivemind.ExpertBackend(\n name=expert_uid,\n expert=expert,\n args_schema=args_schema,\n optimizer=optim_cls(expert.parameters()),\n scheduler=scheduler,\n num_warmup_steps=num_warmup_steps,\n num_total_steps=num_total_steps,\n clip_grad_norm=clip_grad_norm,\n min_batch_size=min_batch_size,\n max_batch_size=max_batch_size,\n )\n\n if checkpoint_dir is not None:\n load_experts(experts, checkpoint_dir)\n\n return cls(\n dht,\n experts,\n listen_on=listen_on,\n num_connection_handlers=num_handlers,\n device=device,\n checkpoint_dir=checkpoint_dir,\n stats_report_interval=stats_report_interval,\n start=start,\n )\n\n def run(self):\n \"\"\"\n Starts Server in the current thread. Initializes dht if necessary, starts connection handlers,\n runs Runtime (self.runtime) to process incoming requests.\n \"\"\"\n logger.info(f\"Server started at {self.listen_on}\")\n logger.info(f\"Got {len(self.experts)} experts:\")\n for expert_name, backend in self.experts.items():\n num_parameters = sum(p.numel() for p in backend.expert.parameters() if p.requires_grad)\n logger.info(f\"{expert_name}: {backend.expert.__class__.__name__}, {num_parameters} parameters\")\n\n if self.dht:\n if not self.dht.is_alive():\n self.dht.run_in_background(await_ready=True)\n\n if self.experts:\n self.dht_handler_thread.start()\n if self.checkpoint_saver is not None:\n self.checkpoint_saver.start()\n\n for process in self.conn_handlers:\n if not process.is_alive():\n process.start()\n process.ready.wait()\n\n try:\n self.runtime.run()\n finally:\n self.shutdown()\n\n def run_in_background(self, await_ready=True, timeout=None):\n \"\"\"\n Starts Server in a background thread. if await_ready, this method will wait until background server\n is ready to process incoming requests or for :timeout: seconds max.\n \"\"\"\n self.start()\n if await_ready and not self.ready.wait(timeout=timeout):\n raise TimeoutError(\"Server didn't notify .ready in {timeout} seconds\")\n\n @property\n def ready(self) -> mp.synchronize.Event:\n \"\"\"\n An event (multiprocessing.Event) that is set when the server is ready to process requests.\n\n Example\n =======\n >>> server.start()\n >>> server.ready.wait(timeout=10)\n >>> print(\"Server ready\" if server.ready.is_set() else \"Server didn't start in 10 seconds\")\n \"\"\"\n return self.runtime.ready # mp.Event that is true if self is ready to process batches\n\n def shutdown(self):\n \"\"\"\n Gracefully terminate the server, process-safe.\n Please note that terminating server otherwise (e.g. by killing processes) may result in zombie processes.\n If you did already cause a zombie outbreak, your only option is to kill them with -9 (SIGKILL).\n \"\"\"\n self.ready.clear()\n\n for process in self.conn_handlers:\n process.terminate()\n process.join()\n logger.debug(\"Connection handlers terminated\")\n\n if self.dht and self.experts:\n self.dht_handler_thread.stop.set()\n self.dht_handler_thread.join()\n\n if self.checkpoint_saver is not None:\n self.checkpoint_saver.stop.set()\n self.checkpoint_saver.join()\n\n if self.dht is not None:\n self.dht.shutdown()\n self.dht.join()\n\n logger.debug(f\"Shutting down runtime\")\n\n self.runtime.shutdown()\n logger.info(\"Server shutdown succesfully\")\n\n\n@contextmanager\ndef background_server(*args, shutdown_timeout=5, **kwargs) -> Tuple[hivemind.Endpoint, List[Multiaddr]]:\n \"\"\"A context manager that creates server in a background thread, awaits .ready on entry and shutdowns on exit\"\"\"\n pipe, runners_pipe = mp.Pipe(duplex=True)\n runner = mp.Process(target=_server_runner, args=(runners_pipe, *args), kwargs=kwargs)\n try:\n runner.start()\n # once the server is ready, runner will send us\n # either (False, exception) or (True, (server.listen_on, dht_maddrs))\n start_ok, data = pipe.recv()\n if start_ok:\n yield data\n pipe.send(\"SHUTDOWN\") # on exit from context, send shutdown signal\n else:\n raise RuntimeError(f\"Server failed to start: {data}\")\n finally:\n runner.join(timeout=shutdown_timeout)\n if runner.is_alive():\n logger.info(\"Server failed to shutdown gracefully, terminating it the hard way...\")\n runner.kill()\n logger.info(\"Server terminated.\")\n\n\ndef _server_runner(pipe, *args, **kwargs):\n try:\n server = Server.create(*args, start=True, **kwargs)\n except Exception as e:\n logger.exception(f\"Encountered an exception when starting a server: {e}\")\n pipe.send((False, f\"{type(e).__name__} {e}\"))\n return\n\n try:\n dht_maddrs = server.dht.get_visible_maddrs() if server.dht is not None else None\n pipe.send((True, (server.listen_on, dht_maddrs)))\n pipe.recv() # wait for shutdown signal\n\n finally:\n logger.info(\"Shutting down server...\")\n server.shutdown()\n server.join()\n logger.info(\"Server shut down.\")\n"
] |
[
[
"torch.cuda.is_available"
]
] |
H-B-P/Durkon-Tobit
|
[
"722b72b3a1f00634a4b024bd6f4da9a70c1501e3"
] |
[
"actual_modelling.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport math\nimport copy\nimport time\n\nimport util\nimport apply_model\nimport calculus\n\ndef produce_cont_relevances(inputDf, model, col):\n reles=np.zeros((len(model[\"conts\"][col]),len(inputDf)))\n \n reles[0][(inputDf[col]<=model[\"conts\"][col][0][0])] = 1 #d(featpred)/d(pt)\n for i in range(len(model[\"conts\"][col])-1):\n x = inputDf[col]\n x1 = model[\"conts\"][col][i][0]\n x2 = model[\"conts\"][col][i+1][0]\n subset = (x>=x1) & (x<=x2)\n #print(reles[subset][:,1])\n reles[i][subset] = (x2 - x[subset])/(x2 - x1) #d(featpred)/d(pt)\n reles[i+1][subset] = (x[subset] - x1)/(x2 - x1) #d(featpred)/d(pt)\n reles[-1][(inputDf[col]>=model[\"conts\"][col][-1][0])] = 1 #d(featpred)/d(pt)\n \n return np.transpose(reles) #roundabout way of doing this but the rest of the function doesn't flow as naturally if x and y don't switch places\n\ndef produce_cont_relevances_dict(inputDf, model):\n opDict = {}\n \n for col in model[\"conts\"]:\n opDict[col]=produce_cont_relevances(inputDf, model, col)\n \n return opDict\n\ndef produce_cat_relevances(inputDf, model, col):\n reles=np.zeros((len(model[\"cats\"][col][\"uniques\"])+1,len(inputDf)))\n \n skeys = apply_model.get_sorted_keys(model, col)\n for i in range(len(skeys)):\n reles[i][inputDf[col].isin([skeys[i]])] = 1 #d(featpred)/d(pt)\n reles[-1][~inputDf[col].isin(skeys)] = 1 #d(featpred)/d(pt)\n \n return np.transpose(reles) #roundabout way of doing this but the rest of the function doesn't flow as naturally if x and y don't switch places\n\ndef produce_cat_relevances_dict(inputDf, model):\n opDict = {}\n \n for col in model[\"cats\"]:\n opDict[col]=produce_cat_relevances(inputDf, model, col)\n \n return opDict\n\ndef sum_and_listify_matrix(a):\n return np.array(sum(a)).tolist()\n\ndef produce_total_relevances_dict(contReleDict, catReleDict):\n op = {\"conts\":{},\"cats\":{}}\n for col in contReleDict:\n print(sum(contReleDict[col]))\n op[\"conts\"][col] = sum_and_listify_matrix(contReleDict[col])\n for col in catReleDict:\n op[\"cats\"][col] = sum_and_listify_matrix(catReleDict[col])\n print(op)\n return op\n\ndef produce_wReleDict(releDict, w):\n wReleDict = {}\n for col in releDict:\n wReleDict[col]=w*releDict[col]\n return wReleDict\n\ndef train_model(inputDf, inputDfC, target, nrounds, lrsU, lrsP, startingModelsU, startingModelsP, weights=None, weightsC=None, gradU=calculus.gnormal_u_diff, gradP=calculus.gnormal_p_diff, gradUC=calculus.u_diff_censored, gradPC=calculus.p_diff_censored):\n \n modelsU = copy.deepcopy(startingModelsU)\n modelsP = copy.deepcopy(startingModelsP)\n \n if weights==None:\n weights = np.ones(len(inputDf))\n w = np.array(np.transpose(np.matrix(weights)))\n sw = sum(weights)\n \n if weightsC==None:\n weightsC = np.ones(len(inputDfC))\n wC = np.array(np.transpose(np.matrix(weightsC)))\n swC = sum(weightsC)\n \n contReleDictListU=[]\n catReleDictListU=[]\n totReleDictListU=[]\n \n contWReleDictListU=[]\n catWReleDictListU=[]\n totWReleDictListU=[]\n \n contReleDictListP=[]\n catReleDictListP=[]\n totReleDictListP=[]\n \n contWReleDictListP=[]\n catWReleDictListP=[]\n totWReleDictListP=[]\n \n contReleDictListUC=[]\n catReleDictListUC=[]\n totReleDictListUC=[]\n \n contWReleDictListUC=[]\n catWReleDictListUC=[]\n totWReleDictListUC=[]\n \n contReleDictListPC=[]\n catReleDictListPC=[]\n totReleDictListPC=[]\n \n contWReleDictListPC=[]\n catWReleDictListPC=[]\n totWReleDictListPC=[]\n \n print(\"initial relevances setup\")\n \n for model in modelsU:\n cord = produce_cont_relevances_dict(inputDf,model)\n card = produce_cat_relevances_dict(inputDf,model)\n \n contReleDictListU.append(cord)\n catReleDictListU.append(card)\n totReleDictListU.append(produce_total_relevances_dict(cord, card))\n \n cowrd = produce_wReleDict(cord, w)\n cawrd = produce_wReleDict(card, w)\n \n contWReleDictListU.append(cowrd)\n catWReleDictListU.append(cawrd)\n totWReleDictListU.append(produce_total_relevances_dict(cowrd, cawrd))\n \n for model in modelsP:\n cord = produce_cont_relevances_dict(inputDf,model)\n card = produce_cat_relevances_dict(inputDf,model)\n \n contReleDictListP.append(cord)\n catReleDictListP.append(card)\n totReleDictListP.append(produce_total_relevances_dict(cord, card))\n \n cowrd = produce_wReleDict(cord, w)\n cawrd = produce_wReleDict(card, w)\n \n contWReleDictListP.append(cowrd)\n catWReleDictListP.append(cawrd)\n totWReleDictListP.append(produce_total_relevances_dict(cowrd, cawrd))\n \n for model in modelsU:\n cord = produce_cont_relevances_dict(inputDfC,model)\n card = produce_cat_relevances_dict(inputDfC,model)\n \n contReleDictListUC.append(cord)\n catReleDictListUC.append(card)\n totReleDictListUC.append(produce_total_relevances_dict(cord, card))\n \n cowrd = produce_wReleDict(cord, wC)\n cawrd = produce_wReleDict(card, wC)\n \n contWReleDictListUC.append(cowrd)\n catWReleDictListUC.append(cawrd)\n totWReleDictListUC.append(produce_total_relevances_dict(cowrd, cawrd))\n \n for model in modelsP:\n cord = produce_cont_relevances_dict(inputDfC,model)\n card = produce_cat_relevances_dict(inputDfC,model)\n \n contReleDictListPC.append(cord)\n catReleDictListPC.append(card)\n totReleDictListPC.append(produce_total_relevances_dict(cord, card))\n \n cowrd = produce_wReleDict(cord, wC)\n cawrd = produce_wReleDict(card, wC)\n \n contWReleDictListPC.append(cowrd)\n catWReleDictListPC.append(cawrd)\n totWReleDictListPC.append(produce_total_relevances_dict(cowrd, cawrd))\n \n for i in range(nrounds):\n \n #First, do everything for the uncensored df . . .\n \n print(\"epoch: \"+str(i+1)+\"/\"+str(nrounds))\n print('U')\n for model in modelsU:\n apply_model.explain(model)\n \n print('P')\n for model in modelsP:\n apply_model.explain(model)\n \n print(\"initial pred and effect-gathering\")\n \n predsU=[]\n overallPredU=pd.Series([0]*len(inputDf))\n contEffectsListU=[]\n catEffectsListU=[]\n \n for m in range(len(modelsU)):\n \n contEffectsU = apply_model.get_effects_of_cont_cols_from_relevance_dict(contReleDictListU[m],modelsU[m])\n contEffectsListU.append(contEffectsU)\n catEffectsU = apply_model.get_effects_of_cat_cols_from_relevance_dict(catReleDictListU[m],modelsU[m])\n catEffectsListU.append(catEffectsU)\n \n predU = apply_model.pred_from_effects(modelsU[m][\"BASE_VALUE\"], len(inputDf), contEffectsU, catEffectsU)\n predsU.append(predU)\n overallPredU += predU\n \n predsP=[]\n overallPredP=pd.Series([0]*len(inputDf))\n contEffectsListP=[]\n catEffectsListP=[]\n \n for m in range(len(modelsP)):\n \n contEffectsP = apply_model.get_effects_of_cont_cols_from_relevance_dict(contReleDictListP[m],modelsP[m])\n contEffectsListP.append(contEffectsP)\n catEffectsP = apply_model.get_effects_of_cat_cols_from_relevance_dict(catReleDictListP[m],modelsP[m])\n catEffectsListP.append(catEffectsP)\n \n predP = apply_model.pred_from_effects(modelsP[m][\"BASE_VALUE\"], len(inputDf), contEffectsP, catEffectsP)\n predsP.append(predP)\n overallPredP += predP\n \n gradientU = -gradU(np.array(inputDf[target]), overallPredU, overallPredP) #d(Loss)/d(predU)\n gradientP = -gradP(np.array(inputDf[target]), overallPredU, overallPredP) #d(Loss)/d(predP)\n \n for m in range(len(modelsU)):\n \n model=modelsU[m]\n pred=predsU[m]\n \n print(\"adjust conts\")\n \n for col in model[\"conts\"]:\n \n effectOfCol = contEffectsListU[m][col]\n \n peoc = predU/effectOfCol #d(predU)/d(featpred)\n \n finalGradients = np.matmul(np.array(peoc*gradientU),contWReleDictListU[m][col]) #d(Loss)/d(pt) = d(Loss)/d(predU) * d(predU)/d(featpred) * d(featpred)/d(pt)\n \n for k in range(len(finalGradients)):\n totRele = totWReleDictListU[m][\"conts\"][col][k] + totWReleDictListUC[m][\"conts\"][col][k]\n if totRele>0:\n modelsU[m][\"conts\"][col][k][1] -= finalGradients[k]*lrsU[m]/totRele #and not /(sw+swC)\n \n print(\"adjust cats\")\n \n for col in model[\"cats\"]:\n \n effectOfCol = catEffectsListU[m][col]\n \n peoc = predU/effectOfCol #d(predU)/d(featpred)\n \n finalGradients = np.matmul(np.array(peoc*gradientU),catWReleDictListU[m][col]) #d(Loss)/d(pt) = d(Loss)/d(predU) * d(predU)/d(featpred) * d(featpred)/d(pt)\n \n skeys = apply_model.get_sorted_keys(model, col)\n \n #all the uniques . . .\n for k in range(len(skeys)):\n totRele = totWReleDictListU[m][\"cats\"][col][k] + totWReleDictListUC[m][\"cats\"][col][k]\n if totRele>0:\n modelsU[m][\"cats\"][col][\"uniques\"][skeys[k]] -= finalGradients[k]*lrsU[m]/totRele #and not /(sw+swC)\n \n # . . . and \"OTHER\"\n totRele = totWReleDictListU[m][\"cats\"][col][-1] + totWReleDictListUC[m][\"cats\"][col][-1]\n if totRele>0:\n modelsU[m][\"cats\"][col][\"OTHER\"] -= finalGradients[-1]*lrsU[m]/totRele #and not /(sw+swC)\n \n for m in range(len(modelsP)):\n \n model=modelsP[m]\n pred=predsP[m]\n \n print(\"adjust conts\")\n \n for col in model[\"conts\"]:\n \n effectOfCol = contEffectsListP[m][col]\n \n peoc = predP/effectOfCol #d(predP)/d(featpred)\n \n finalGradients = np.matmul(np.array(peoc*gradientP),contWReleDictListP[m][col]) #d(Loss)/d(pt) = d(Loss)/d(predP) * d(predP)/d(featpred) * d(featpred)/d(pt)\n \n for k in range(len(finalGradients)):\n totRele = totWReleDictListP[m][\"conts\"][col][k] + totWReleDictListPC[m][\"conts\"][col][k]\n if totRele>0:\n modelsP[m][\"conts\"][col][k][1] -= finalGradients[k]*lrsP[m]/totRele #and not /(sw+swC)\n \n print(\"adjust cats\")\n \n for col in model[\"cats\"]:\n \n effectOfCol = catEffectsListP[m][col]\n \n peoc = predP/effectOfCol #d(predP)/d(featpred)\n \n finalGradients = np.matmul(np.array(peoc*gradientP),catWReleDictListP[m][col]) #d(Loss)/d(pt) = d(Loss)/d(predP) * d(predP)/d(featpred) * d(featpred)/d(pt)\n \n skeys = apply_model.get_sorted_keys(model, col)\n \n #all the uniques . . .\n for k in range(len(skeys)):\n totRele = totWReleDictListP[m][\"cats\"][col][k] + totWReleDictListPC[m][\"cats\"][col][k]\n if totRele>0:\n modelsP[m][\"cats\"][col][\"uniques\"][skeys[k]] -= finalGradients[k]*lrsP[m]/totRele #and not /(sw+swC)\n \n # . . . and \"OTHER\"\n totRele = totWReleDictListP[m][\"cats\"][col][-1] + totWReleDictListPC[m][\"cats\"][col][-1]\n if totRele>0:\n modelsP[m][\"cats\"][col][\"OTHER\"] -= finalGradients[-1]*lrsP[m]/totRele #and not /(sw+swC)\n \n # . . . and then again for the censored\n \n print(\"initial pred and effect-gathering\")\n \n predsUC=[]\n overallPredUC=pd.Series([0]*len(inputDfC))\n contEffectsListUC=[]\n catEffectsListUC=[]\n \n for m in range(len(modelsU)):\n \n contEffectsUC = apply_model.get_effects_of_cont_cols_from_relevance_dict(contReleDictListUC[m],modelsU[m])\n contEffectsListUC.append(contEffectsUC)\n catEffectsUC = apply_model.get_effects_of_cat_cols_from_relevance_dict(catReleDictListUC[m],modelsU[m])\n catEffectsListUC.append(catEffectsUC)\n \n predUC = apply_model.pred_from_effects(modelsU[m][\"BASE_VALUE\"], len(inputDfC), contEffectsUC, catEffectsUC)\n predsUC.append(predUC)\n overallPredUC += predUC\n \n predsPC=[]\n overallPredPC=pd.Series([0]*len(inputDfC))\n contEffectsListPC=[]\n catEffectsListPC=[]\n \n for m in range(len(modelsP)):\n \n contEffectsPC = apply_model.get_effects_of_cont_cols_from_relevance_dict(contReleDictListPC[m],modelsP[m])\n contEffectsListPC.append(contEffectsPC)\n catEffectsPC = apply_model.get_effects_of_cat_cols_from_relevance_dict(catReleDictListPC[m],modelsP[m])\n catEffectsListPC.append(catEffectsPC)\n \n predPC = apply_model.pred_from_effects(modelsP[m][\"BASE_VALUE\"], len(inputDfC), contEffectsPC, catEffectsPC)\n predsPC.append(predPC)\n overallPredPC += predPC\n \n gradientUC = -gradUC(np.array(inputDfC[target]), overallPredUC, overallPredPC) #d(Loss)/d(predU)\n gradientPC = -gradPC(np.array(inputDfC[target]), overallPredUC, overallPredPC) #d(Loss)/d(predP)\n \n for m in range(len(modelsU)):\n \n model=modelsU[m]\n predUC=predsUC[m]\n \n print(\"adjust conts\")\n \n for col in model[\"conts\"]:\n \n effectOfCol = contEffectsListUC[m][col]\n \n peoc = predUC/effectOfCol #d(predU)/d(featpred)\n \n finalGradients = np.matmul(np.array(peoc*gradientUC),contWReleDictListUC[m][col]) #d(Loss)/d(pt) = d(Loss)/d(predU) * d(predU)/d(featpred) * d(featpred)/d(pt)\n \n for k in range(len(finalGradients)):\n totRele = totWReleDictListU[m][\"conts\"][col][k] + totWReleDictListUC[m][\"conts\"][col][k]\n if totRele>0:\n modelsU[m][\"conts\"][col][k][1] -= finalGradients[k]*lrsU[m]/totRele #and not /(sw + swC)\n \n print(\"adjust cats\")\n \n for col in model[\"cats\"]:\n \n effectOfCol = catEffectsListUC[m][col]\n \n peoc = predUC/effectOfCol #d(predU)/d(featpred)\n \n finalGradients = np.matmul(np.array(peoc*gradientUC),catWReleDictListUC[m][col]) #d(Loss)/d(pt) = d(Loss)/d(predU) * d(predU)/d(featpred) * d(featpred)/d(pt)\n \n skeys = apply_model.get_sorted_keys(model, col)\n \n #all the uniques . . .\n for k in range(len(skeys)):\n totRele = totWReleDictListU[m][\"cats\"][col][k] + totWReleDictListUC[m][\"cats\"][col][k]\n if totRele>0:\n modelsU[m][\"cats\"][col][\"uniques\"][skeys[k]] -= finalGradients[k]*lrsU[m]/totRele #and not /(sw+swC)\n \n # . . . and \"OTHER\"\n totRele = totWReleDictListU[m][\"cats\"][col][-1] + totWReleDictListUC[m][\"cats\"][col][-1]\n if totRele>0:\n modelsU[m][\"cats\"][col][\"OTHER\"] -= finalGradients[-1]*lrsU[m]/totRele #and not /(sw+swC)\n \n for m in range(len(modelsP)):\n \n model=modelsP[m]\n predPC=predsPC[m]\n \n print(\"adjust conts\")\n \n for col in model[\"conts\"]:\n \n effectOfCol = contEffectsListPC[m][col]\n \n peoc = predPC/effectOfCol #d(predP)/d(featpred)\n \n finalGradients = np.matmul(np.array(peoc*gradientPC),contWReleDictListPC[m][col]) #d(Loss)/d(pt) = d(Loss)/d(predP) * d(predP)/d(featpred) * d(featpred)/d(pt)\n \n for k in range(len(finalGradients)):\n totRele = totWReleDictListP[m][\"conts\"][col][k] + totWReleDictListPC[m][\"conts\"][col][k]\n if totRele>0:\n modelsP[m][\"conts\"][col][k][1] -= finalGradients[k]*lrsP[m]/totRele #and not /(sw+swC)\n \n print(\"adjust cats\")\n \n for col in model[\"cats\"]:\n \n effectOfCol = catEffectsListPC[m][col]\n \n peoc = predPC/effectOfCol #d(predP)/d(featpred)\n \n finalGradients = np.matmul(np.array(peoc*gradientPC),catWReleDictListPC[m][col]) #d(Loss)/d(pt) = d(Loss)/d(predP) * d(predP)/d(featpred) * d(featpred)/d(pt)\n \n skeys = apply_model.get_sorted_keys(model, col)\n \n #all the uniques . . .\n for k in range(len(skeys)):\n totRele = totWReleDictListP[m][\"cats\"][col][k] + totWReleDictListPC[m][\"cats\"][col][k]\n if totRele>0:\n modelsP[m][\"cats\"][col][\"uniques\"][skeys[k]] -= finalGradients[k]*lrsP[m]/totRele #and not /(sw+swC)\n \n # . . . and \"OTHER\"\n totRele = totWReleDictListP[m][\"cats\"][col][-1] + totWReleDictListPC[m][\"cats\"][col][-1]\n if totRele>0:\n modelsP[m][\"cats\"][col][\"OTHER\"] -= finalGradients[-1]*lrsP[m]/totRele #and not /(sw+swC)\n \n return modelsU, modelsP\n\nif __name__ == '__main__':\n df = pd.read_csv('gnormal.csv')\n modelsU = [{\"BASE_VALUE\":1.0,\"conts\":{\"x\":[[0,103], [7,103]]}, \"cats\":[]}]\n modelsP = [{\"BASE_VALUE\":1.0,\"conts\":{\"x\":[[0,0.2], [7,0.2]]}, \"cats\":[]}]\n cdf = df[df['censored']].reset_index()\n udf = df[~df['censored']].reset_index()\n newModelsU, newModelsP = train_model(udf, cdf, \"y\", 1000, [10], [0.005], modelsU, modelsP)\n for newModel in newModelsU:\n apply_model.explain(newModel)\n for newModel in newModelsP:\n apply_model.explain(newModel)\n \n"
] |
[
[
"numpy.matrix",
"numpy.array",
"pandas.read_csv",
"numpy.transpose"
]
] |
RHammond2/ORBIT
|
[
"3bd007e9e256183ecaf691eb4138b46915f94003"
] |
[
"ORBIT/parametric.py"
] |
[
"__author__ = [\"Jake Nunemaker\"]\n__copyright__ = \"Copyright 2020, National Renewable Energy Laboratory\"\n__maintainer__ = \"Jake Nunemaker\"\n__email__ = [\"[email protected]\"]\n\n\nimport time\nfrom copy import deepcopy\nfrom random import sample\nfrom itertools import product\n\nimport numpy as np\nimport pandas as pd\nfrom benedict import benedict\n\nfrom ORBIT import ProjectManager\n\n\nclass ParametricManager:\n \"\"\"Class for configuring parametric ORBIT runs.\"\"\"\n\n def __init__(\n self, base, params, funcs, weather=None, module=None, product=False\n ):\n \"\"\"\n Creates an instance of `ParametricRun`.\n\n Parameters\n ----------\n base : dict\n Base ORBIT configuration.\n params : dict\n Parameters and their values.\n Format: \"subdict.param\": [num1, num2, num3]\n funcs : dict\n List of functions used to save results.\n Format: \"name\": lambda project: project.attr\n weather : DataFrame\n\n \"\"\"\n\n self.base = benedict(base)\n self.params = params\n self.funcs = funcs\n self.weather = weather\n self.results = None\n self.module = module\n self.product = product\n\n def run(self, **kwargs):\n \"\"\"Run the configured parametric runs and save any requested results to\n `self.results`.\"\"\"\n\n outputs = []\n for run in self.run_list:\n data = self._run_config(run, **kwargs)\n outputs.append(data)\n\n self.results = pd.DataFrame(outputs)\n\n def _run_config(self, run, **kwargs):\n \"\"\"Run an individual config.\"\"\"\n\n config = deepcopy(self.base)\n config.merge(run)\n\n if self.module is not None:\n project = self.module(config, weather=self.weather, **kwargs)\n project.run()\n\n else:\n project = ProjectManager(config, weather=self.weather, **kwargs)\n project.run()\n\n results = self.map_funcs(project, self.funcs)\n data = {**run, **results}\n\n return data\n\n @property\n def run_list(self):\n \"\"\"Returns list of configured parametric runs.\"\"\"\n\n if self.product:\n runs = list(product(*self.params.values()))\n\n else:\n runs = list(zip(*self.params.values()))\n\n return [dict(zip(self.params.keys(), run)) for run in runs]\n\n @property\n def num_runs(self):\n return len(self.run_list)\n\n @staticmethod\n def map_funcs(obj, funcs):\n \"\"\"\n Map `obj` to list of `funcs`.\n\n Parameters\n ----------\n obj : ProjectManager\n Project instance to run through functions.\n funcs : list\n Functions used to pull results from obj.\n \"\"\"\n\n results = {}\n for k, f in funcs.items():\n try:\n res = f(obj)\n\n except TypeError:\n raise TypeError(\n f\"Result function '{f}' not structured properly. \"\n f\"Correct format: 'lambda project: project.{f}'\"\n )\n\n except AttributeError:\n res = np.NaN\n\n results[k] = res\n\n return results\n\n def preview(self, num=10, **kwargs):\n \"\"\"\n Runs a limited set of runs to preview the results and provide an\n estimate for total run time.\n\n Parameters\n ----------\n num : int\n Number to run.\n \"\"\"\n\n start = time.time()\n outputs = []\n if num > len(self.run_list):\n to_run = self.run_list\n\n else:\n to_run = sample(self.run_list, num)\n\n for run in to_run:\n data = self._run_config(run, **kwargs)\n outputs.append(data)\n\n elapsed = time.time() - start\n estimate = (len(self.run_list) / num) * elapsed\n print(f\"{num} runs elapsed time: {elapsed:.2f}s\")\n print(f\"{self.num_runs} runs estimated time: {estimate:.2f}s\")\n\n return pd.DataFrame(outputs)\n"
] |
[
[
"pandas.DataFrame"
]
] |
yedrek/shrinkage
|
[
"eb9d1790045b1931404923263c87c7e40ef74baa"
] |
[
"shrinkage/rie.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom sympy import nroots, re, im, symbols, sympify\nfrom types import FunctionType\nfrom collections import defaultdict\nfrom tqdm.auto import tqdm\n\nfrom .varma import Varma\n\n\nclass LedoitPecheShrinkage:\n \"\"\"\n Given sample eigenvalues lambda_i, i = 1, ..., N, of an estimator E,\n as well as the number of samples T:\n - estimate the eigenvalue density and Hilbert transform of E\n via the (adaptive) Epanechnikov kernel\n [Ledoit, O., Wolf, M., Analytical nonlinear shrinkage of large-dimensional covariance matrices];\n - calculate the Ledoit-Peche shrunk eigenvalues xi_i, i = 1, ..., N.\n \"\"\"\n def __init__(self, lambdas, T):\n \"\"\"\n Instantiated with a 1-dimensional array of eigenvalues, lambdas, and a number of samples, T.\n Calculates:\n - N = number of eigenvalues (length of lambdas);\n - q = N/T;\n - rescaling factor \"bandwidth\" for kernel estimation; set to T^(-1/3) for each eigenvalue;\n - Epanechnikov kernel estimation of the density and Hilbert transform of lambdas;\n - intermediate and final Ledoit-Peche variables alpha_i, beta_i, u_i, xi_LP_i;\n - Epanechnikov kernel estimation of the density and Hilbert transform of xi_LP_i.\n \"\"\"\n self.lambdas = np.array(lambdas)\n self.T = T\n \n self.N = len(self.lambdas)\n self.q = self.N / self.T\n \n self.bandwidth = (self.T ** (-1/3)) * np.ones(self.N)\n\n self.name = 'Ledoit-Peche'\n \n self._calculate_epanechnikov_estimates(eigval='lambdas')\n self._calculate_LP_variables()\n self._calculate_epanechnikov_estimates(eigval='xi_LP')\n\n self.batch_idx = None\n self.N_batch = None\n\n self.chi_roots_branch = None\n self.xi_branch = None\n self.M_of_N_branch = None\n self.n_branches = None\n self.xi = None\n self.chi_roots = None\n self.chi_grads = None\n self.xi_grads = None\n\n self.xi_kernel_density = None\n self.xi_kernel_Hilbert = None\n\n\n def _calculate_epanechnikov_estimates(self, eigval='lambdas'):\n \"\"\"\n Perform Epanechnikov kernel estimation of the density and Hilbert transform\n of a given array \"eigval\", be it \"lambdas\", \"xi_LP\" or \"xi\".\n \"\"\"\n if eigval=='lambdas':\n self.lambdas_kernel_density, self.lambdas_kernel_Hilbert = __class__.epanechnikov_estimates(\n x=self.lambdas,\n bandwidth=self.bandwidth\n )\n \n elif eigval=='xi_LP':\n self.xi_LP_kernel_density, self.xi_LP_kernel_Hilbert = __class__.epanechnikov_estimates(\n x=self.xi_LP,\n bandwidth=self.bandwidth\n )\n \n elif eigval=='xi':\n if self.xi is None or len(self.xi) < self.N:\n self.predict()\n self.xi_kernel_density, self.xi_kernel_Hilbert = __class__.epanechnikov_estimates(\n x=self.xi,\n bandwidth=self.bandwidth\n )\n \n else:\n raise Exception('The argument \"eigval\" must be either \"lambdas\", \"xi_LP\", or \"xi\".')\n \n\n def _calculate_LP_variables(self):\n \"\"\"\n Calculate the intermediate variables alpha_i and beta_i,\n plus the complex numbers u_i = alpha_i + i * beta_i,\n of the Ledoit-Peche nonlinear shrinkage xi_i = xi_LP_i, which also compute here,\n of the sample eigenvalues lambda_i.\n \"\"\"\n self.alpha = self.q * (self.lambdas * self.lambdas_kernel_Hilbert - 1.)\n self.beta = np.pi * self.q * self.lambdas * self.lambdas_kernel_density\n\n self.u_range = np.array([complex(al, be) for al, be in zip(self.alpha, self.beta)])\n\n self.xi_LP = self.lambdas / ((self.alpha + 1.) ** 2 + self.beta ** 2)\n \n\n def _set_batch_idx(self, batch_idx=None):\n \"\"\"\n Choose indices i, from 0 to (N - 1), on which to perform the calculation of xi_i.\n By default, all the indices are chosen, i.e. we calculate for every i = 0, ..., N - 1.\n \"\"\"\n self.batch_idx = list(range(self.N)) if batch_idx is None else batch_idx\n self.N_batch = len(self.batch_idx)\n\n def predict(self, batch_idx=None):\n \"\"\"\n Calculate the Ledoit-Peche nonlinear shrinkage xi_i\n of the sample eigenvalues lambda_i.\n Note: We've already calculated xi_LP_i = xi_i, so this is a simple substitution.\n A \"predict\" method is needed for consistency here and in every child class.\n \"\"\"\n self._set_batch_idx(batch_idx=batch_idx)\n self.xi = self.xi_LP[self.batch_idx]\n\n self.xi_branch = [self.xi]\n self.n_branches = len(self.xi_branch)\n \n\n def hist(\n self,\n show_lambdas=False,\n show_lambdas_density=False,\n show_lambdas_Hilbert=False,\n show_xi=False,\n show_xi_density=False,\n show_xi_Hilbert=False,\n show_xi_LP=False,\n show_xi_LP_density=False,\n show_xi_LP_Hilbert=False,\n bins=None,\n xlim=None,\n ylim=None,\n legend=True,\n savefig=None\n ):\n \"\"\"\n Plot three histograms:\n - of the sample eigenvalues lambda_i,\n - the Ledoit-Peche shrunk eigenvalues xi_LP_i,\n - and the shrunk eigenvalues xi_i,\n optionally with their Epanechnikov-kernel-estimated density,\n and/or Hilbert transforms.\n (In other words, any combination of these nine plots.)\n \"\"\"\n bns = self.N // 4 if bins is None else bins\n if show_lambdas:\n plt.hist(\n self.lambdas,\n bins=bns,\n alpha=0.5,\n color='tab:orange',\n density=True,\n label='sample eigval'\n )\n \n if show_lambdas_density:\n plt.plot(\n self.lambdas,\n self.lambdas_kernel_density,\n color='tab:red',\n label='sample eigval density'\n )\n \n if show_lambdas_Hilbert:\n plt.plot(\n self.lambdas,\n self.lambdas_kernel_Hilbert,\n color='tab:green',\n label='sample eigval Hilbert'\n )\n \n if show_xi_LP:\n plt.hist(\n self.xi_LP,\n bins=bns,\n alpha=0.5,\n color='tab:pink',\n density=True,\n label=f'Ledoit-Peche shrunk eigval'\n )\n \n if show_xi_LP_density:\n plt.plot(\n self.xi_LP,\n self.xi_LP_kernel_density,\n color='brown',\n label=f'Ledoit-Peche shrunk eigval density'\n )\n \n if show_xi_LP_Hilbert:\n plt.plot(\n self.xi_LP,\n self.xi_LP_kernel_Hilbert,\n color='tab:cyan',\n label=f'Ledoit-Peche shrunk eigval Hilbert'\n )\n \n if show_xi:\n if self.xi is None or len(self.xi) < self.N:\n self.predict()\n plt.hist(\n self.xi,\n bins=bns,\n alpha=0.5,\n color='fuchsia',\n density=True,\n label=f'{self.name} shrunk eigval'\n )\n \n if show_xi_density:\n if self.xi_kernel_density is None:\n self._calculate_epanechnikov_estimates(eigval='xi')\n plt.plot(\n self.xi,\n self.xi_kernel_density,\n color='purple',\n label=f'{self.name} shrunk eigval density'\n )\n \n if show_xi_Hilbert:\n if self.xi_kernel_Hilbert is None:\n self._calculate_epanechnikov_estimates(eigval='xi')\n plt.plot(\n self.xi,\n self.xi_kernel_Hilbert,\n color='tab:olive',\n label=f'{self.name} shrunk eigval Hilbert'\n )\n\n plt.xlim(xlim)\n plt.ylim(ylim)\n if legend:\n plt.legend()\n plt.savefig(fname=savefig) if savefig else plt.show()\n\n\n def plot(self, branch=None, xlim=None, ylim=None, legend=True, savefig=None):\n \"\"\"\n Plot the shrunk eigenvalues xi_i (vertical axis)\n versus the sample eigenvalues lambda_i (horizontal axis).\n Often, there are multiple solutions for xi_i (branches), stored in \"xi_branch\",\n and you can plot all or some of them here, besides the correct branch xi_i:\n set \"branch\" to either \"all\", or a string number of a branch (one-indexed),\n or a list of such string numbers.\n \"\"\"\n if self.xi is None or len(self.xi) < self.N:\n self.predict()\n \n if branch is None:\n plt.plot(\n self.lambdas,\n self.xi,\n color='purple',\n label=f'{self.name} shrunk vs. sample eigval'\n )\n\n else:\n if branch=='all':\n branches_to_plot = range(self.n_branches)\n elif isinstance(branch, list):\n branches_to_plot = [int(br) - 1 for br in branch]\n elif isinstance(branch, str):\n branches_to_plot = [int(branch) - 1]\n \n for br in branches_to_plot:\n plt.plot(\n self.lambdas,\n self.xi_branch[br],\n color=cm.hot(0.2 + 0.6 * br / self.n_branches),\n label=f'{self.name} shrunk (branch {br + 1}) vs. sample eigval'\n )\n\n plt.xlabel('lambda')\n plt.ylabel('xi')\n plt.xlim(xlim)\n plt.ylim(ylim)\n if legend:\n plt.legend()\n plt.savefig(fname=savefig) if savefig else plt.show()\n \n\n def plot_with_oracle(self, xi_oracle, show_LP=False, xlim=None, ylim=None, legend=True, savefig=None):\n \"\"\"\n Plot the shrunk eigenvalues xi_i (vertical axis)\n versus the sample eigenvalues lambda_i (horizontal axis).\n Optionally, make an analogous plot of the Ledoit-Peche xi_LP_i versus lambda_i.\n Moreover, make a scatter plot of the oracle eigenvalues xi_oracle_i versus lambda_i.\n \"\"\"\n plt.plot(\n self.lambdas,\n self.xi,\n color='purple',\n label=f'{self.name} shrunk vs. sample eigval'\n )\n if show_LP:\n plt.plot(\n self.lambdas,\n self.xi_LP,\n color='brown',\n label=f'Ledoit-Peche shrunk vs. sample eigval'\n )\n plt.scatter(\n self.lambdas,\n xi_oracle,\n marker='^',\n alpha=0.3,\n label='oracle eigval'\n )\n\n plt.xlabel('lambda')\n plt.ylabel('xi')\n plt.xlim(xlim)\n plt.ylim(ylim)\n if legend:\n plt.legend()\n plt.savefig(fname=savefig) if savefig else plt.show()\n \n\n @staticmethod\n def epanechnikov(x):\n \"\"\"\n Calculate the Epanechnikov kernel and its Hilbert transform\n at the elements of a given array x.\n \"\"\"\n assert isinstance(x, np.ndarray)\n \n y = (3 / (4 * np.sqrt(5))) * (1 - x ** 2 / 5)\n z = np.where(\n np.abs(np.abs(x) - np.sqrt(5)) < 1e-10,\n 0.,\n y * np.log(np.abs((np.sqrt(5) - x) / (np.sqrt(5) + x)))\n )\n \n kernel_density = np.maximum(y, 0)\n kernel_Hilbert = 0.3 * x - z\n \n return kernel_density, kernel_Hilbert\n \n\n @staticmethod\n def epanechnikov_estimates(x, bandwidth):\n \"\"\"\n Perform Epanechnikov-kernel estimation\n of the density and Hilbert transform of an array x\n (using the \"bandwidth\" rescaling factor).\n \"\"\"\n l1 = x * bandwidth\n l2 = np.array([(x_ - x) / l1 for x_ in x])\n l3, l4 = __class__.epanechnikov(l2)\n \n kernel_density = (l3 / l1).mean(axis=1)\n kernel_Hilbert = (l4 / l1).mean(axis=1)\n\n return kernel_density, kernel_Hilbert\n\n\nclass EwmaShrinkage(LedoitPecheShrinkage):\n \"\"\"\n Given sample eigenvalues lambda_i, i = 1, ..., N, of an estimator E,\n as well as the number of samples T,\n perform nonlinear shrinkage, computing shrunk eigenvalues xi_i,\n in the case of auto-correlations given by the EWMA model.\n \"\"\"\n def __init__(self, lambdas, T, delta):\n super().__init__(lambdas=lambdas, T=T)\n self.name = 'EWMA'\n self.delta = delta\n\n\n def predict(self):\n \"\"\"\n Calculate the EWMA nonlinear shrinkage xi_i\n of the sample eigenvalues lambda_i.\n \"\"\"\n if self.alpha is None or self.beta is None:\n super().calculate_LP_variables()\n \n ed = np.exp(self.delta)\n eda = np.exp(self.delta * self.alpha)\n \n self.xi = (\n (self.lambdas / self.beta) * eda * (ed - 1.) ** 2 * np.sin(self.delta * self.beta)\n / self.delta\n / ((ed * eda) ** 2 + 1. - 2 * ed * eda * np.cos(self.delta * self.beta))\n )\n\n self.xi_branch = [self.xi]\n self.n_branches = len(self.xi_branch)\n\n\nclass VarmaShrinkage(LedoitPecheShrinkage, Varma):\n \"\"\"\n Given sample eigenvalues lambda_i, i = 1, ..., N, of an estimator E,\n as well as the number of samples T,\n perform nonlinear shrinkage, computing shrunk eigenvalues xi_i,\n in the case of auto-correlations given by the VARMA model.\n \"\"\"\n def __init__(self, lambdas, T):\n \"\"\"\n \"Adapt\" the model to the sample eigenvalues lambdas (and to T).\n \"\"\"\n LedoitPecheShrinkage.__init__(self, lambdas=lambdas, T=T)\n\n\n def set_params(self, **kwargs):\n \"\"\"\n Set the model's parameters: either tau or a_list and b_list,\n by initializing the parent class handling the parameters;\n it also calculates the autocorrelation matrix A.\n \"\"\"\n Varma.__init__(self, T=self.T, get_chi_equations=True, **kwargs)\n\n\n def get_params(self):\n \"\"\"\n Return a dictionary with the model's parameters.\n \"\"\"\n return {\n 'a_list': self.a_list,\n 'b_list': self.b_list\n }\n\n\n def predict(self, batch_idx=None, calculate_grads=False):\n \"\"\"\n Calculate the VARMA nonlinear shrinkage xi_i\n of the sample eigenvalues lambda_i,\n for several solvable cases.\n \"\"\"\n self._solve_chi_equation(\n batch_idx=batch_idx,\n calculate_grads=calculate_grads\n )\n \n \n def _solve_chi_equation(self, batch_idx=None, calculate_grads=False):\n \"\"\"\n For each value of u_i = alpha_i + i * beta_i, for i = 1, ..., N,\n solve an algebraic equation pol = 0 in the variable chi,\n where \"pol\" will come from a separate file with VARMA polynomials,\n with coefficients depending on u_i, as well as the model's parameters.\n Take the imaginary part of each solution,\n and collect them in branches of the shrunk eigenvalues xi_i.\n Moreover, create a list of the values of M_A(1/chi) for each u_i,\n which should be equal to u_i on the correct branch.\n (Recall, M_A(z) is the M-transform of A,\n and chi = chi_A(z) = 1/N_A(z) is the chi-transform of A.)\n \"\"\"\n self._set_batch_idx(batch_idx=batch_idx)\n\n u_batch = self.u_range[self.batch_idx]\n\n # solve the fundamental polynomial equation in chi at each u in the batch\n # (which will produce n_branches solutions at each u)\n\n chi = symbols('chi')\n\n self.chi_roots_branch = []\n chi_roots_im_branch = []\n self.M_of_N_branch = []\n\n for u in u_batch:\n chi_roots = nroots(\n self.pol(sympify(u), chi, *self.a_list, *self.b_list)\n )\n self.chi_roots_branch.append(chi_roots)\n\n chi_roots_im = [\n float(im(chi_root))\n for chi_root in chi_roots\n ]\n chi_roots_im_branch.append(chi_roots_im)\n\n M_of_N = [\n self.calculate_M_transform_A(\n z_re=float(re(1 / chi_root)),\n z_im=float(im(1 / chi_root)),\n method='eig'\n )\n for chi_root in chi_roots\n ]\n self.M_of_N_branch.append(M_of_N)\n \n # reshape to (n_branches, N_batch)\n \n self.chi_roots_branch = np.array(self.chi_roots_branch).T\n prefactor = self.lambdas[self.batch_idx] / self.beta[self.batch_idx]\n self.xi_branch = prefactor * np.array(chi_roots_im_branch).T\n self.M_of_N_branch = np.array(self.M_of_N_branch).T\n\n self.n_branches = len(self.xi_branch)\n\n # sort the branches according to xi, for convenience\n\n sort_idx = np.argsort(self.xi_branch, axis=0)\n\n self.chi_roots_branch = np.take_along_axis(self.chi_roots_branch, sort_idx, axis=0)\n self.xi_branch = np.take_along_axis(self.xi_branch, sort_idx, axis=0)\n self.M_of_N_branch = np.take_along_axis(self.M_of_N_branch, sort_idx, axis=0)\n\n # choose one \"good\" branch xi, by which we mean the one on which M_A(N_A(u)) = u\n # these are now 1D arrays of length N_batch\n\n sort_idx = np.argsort(np.abs(self.M_of_N_branch - u_batch), axis=0)\n\n self.chi_roots = np.take_along_axis(self.chi_roots_branch, sort_idx, axis=0)[0]\n self.xi = np.take_along_axis(self.xi_branch, sort_idx, axis=0)[0]\n \n # calculate gradients of the solution chi (also the shrunk eigenvalues xi) w.r.t. VARMA parameters\n\n if calculate_grads:\n self.chi_grads = defaultdict(list)\n for u, chi in zip(u_batch, self.chi_roots):\n pol_grad_chi = self.pol_grads['chi'](sympify(u), chi, *self.a_list, *self.b_list)\n for param in self.ab:\n self.chi_grads[param].append(\n (- self.pol_grads[param](sympify(u), chi, *self.a_list, *self.b_list) / pol_grad_chi).evalf()\n )\n self.chi_grads = dict(self.chi_grads)\n\n self.xi_grads = {}\n for param in self.ab:\n self.xi_grads[param] = prefactor * np.array([\n float(im(chi_grad)) for chi_grad in self.chi_grads[param]\n ])\n \n\n def fit(\n self,\n xi_oracle,\n loss='mse',\n loss_grad=None,\n optimizer='brute',\n **kwargs\n ):\n \"\"\"\n Find the VARMA parameters\n for which an error (specified by the loss function)\n between the shrunk eigenvalues and the given oracle eigenvalues\n is minimal.\n Use one of several provided optimization methods (\"optimizer\").\n \"\"\"\n self._set_loss(loss=loss, loss_grad=loss_grad)\n\n self.loss_list = []\n \n if optimizer=='brute':\n self.grid = kwargs.get('grid')\n\n for params_dict in tqdm(self.grid):\n self.set_params(**params_dict)\n self.predict()\n\n self.loss_list.append(\n np.mean(self.loss(xi_oracle, self.xi))\n )\n \n elif optimizer=='gd':\n lr = kwargs.get('lr')\n n_epochs = kwargs.get('n_epochs')\n N_batch = kwargs.get('N_batch')\n n_batches = int(self.N // N_batch)\n r1 = kwargs.get('r1', None)\n r2 = kwargs.get('r2', None)\n\n self._set_random_params(r1, r2)\n\n self.grid = []\n rng = np.random.default_rng()\n for epoch in range(n_epochs):\n print(f'Epoch {epoch} commencing...')\n\n for _ in tqdm(range(n_batches)):\n params_dict = self.get_params()\n\n batch_idx=rng.integers(low=0, high=self.N, size=N_batch) if N_batch < self.N else None\n\n self.predict(batch_idx=batch_idx, calculate_grads=True)\n\n self.loss_grads = [\n np.mean(\n self.loss_grad(xi_oracle[self.batch_idx], self.xi)\n * self.xi_grads[param]\n )\n for param in self.ab\n ]\n\n self.set_params(\n a_list=[\n a_prev - lr * gd\n for a_prev, gd in zip(params_dict['a_list'], self.loss_grads[:(self.r2 + 1)])\n ],\n b_list=[\n b_prev - lr * gd\n for b_prev, gd in zip(params_dict['b_list'], self.loss_grads[(self.r2 + 1):])\n ]\n )\n \n print('Making prediction on the whole dataset...')\n\n self.grid.append(params_dict)\n self.predict()\n self.loss_list.append(\n np.mean(self.loss(xi_oracle, self.xi))\n )\n\n print('... done.')\n\n idx_best = np.argmin(self.loss_list)\n self.params_dict_best = self.grid[idx_best]\n self.set_params(**self.params_dict_best)\n self.predict(calculate_grads=True)\n self.loss_best = self.loss_list[idx_best]\n self.loss_LP = np.mean(self.loss(xi_oracle, self.xi_LP))\n \n\n def _set_loss(self, loss, loss_grad=None):\n \"\"\"\n Set the loss function (and its gradient w.r.t. xi_pred),\n being a function of xi_true and xi_pred,\n based on the \"loss\" argument.\n \"\"\"\n if type(loss)==FunctionType:\n self.loss = loss\n self.loss_grad = loss_grad\n elif loss=='mse':\n self.loss = lambda xi_true, xi_pred: (xi_true - xi_pred) ** 2\n self.loss_grad = lambda xi_true, xi_pred: -2 * (xi_true - xi_pred)\n else:\n raise Exception('Unknown error function.')\n \n\n def _set_random_params(self, r1=None, r2=None):\n rng = np.random.default_rng()\n if r1 is None and r2 is None:\n self.set_params(\n tau=rng.random()\n )\n else:\n eps = 0.1\n random_list = list(eps * rng.random(size=(r1 + r2 + 1)))\n self.set_params(\n a_list=[1. - random_list[0]] + random_list[1:(r2 + 1)],\n b_list=random_list[(r2 + 1):]\n )"
] |
[
[
"numpy.take_along_axis",
"matplotlib.pyplot.legend",
"numpy.sqrt",
"matplotlib.pyplot.plot",
"numpy.argmin",
"numpy.exp",
"numpy.random.default_rng",
"numpy.sin",
"matplotlib.cm.hot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"numpy.argsort",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.maximum",
"numpy.abs",
"matplotlib.pyplot.scatter",
"numpy.cos",
"numpy.ones",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel"
]
] |
JiehangXie/PASSL
|
[
"a4835064fb830f04201b418b1c09d5b1bf813150"
] |
[
"passl/datasets/preprocess/transforms.py"
] |
[
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\nfrom PIL import ImageFilter, Image, ImageOps\nimport numpy as np\n\nimport paddle\n\nimport paddle.vision.transforms as PT\nimport paddle.vision.transforms.functional as F\nfrom .cv2_trans import ByolRandomHorizontalFlip, ByolColorJitter, ByolRandomGrayscale, ByolNormalize,ToCHW,ByolToRGB,ByolCenterCrop, ByolRandomCrop\nfrom .builder import TRANSFORMS, build_transform\n\nTRANSFORMS.register(PT.RandomResizedCrop)\nTRANSFORMS.register(PT.ColorJitter)\nTRANSFORMS.register(PT.Transpose)\nTRANSFORMS.register(PT.Normalize)\nTRANSFORMS.register(PT.RandomHorizontalFlip)\nTRANSFORMS.register(PT.Resize)\nTRANSFORMS.register(PT.CenterCrop)\nTRANSFORMS.register(PT.ToTensor)\n\nTRANSFORMS.register(ByolRandomHorizontalFlip)\nTRANSFORMS.register(ByolColorJitter)\nTRANSFORMS.register(ByolRandomGrayscale)\nTRANSFORMS.register(ByolNormalize)\nTRANSFORMS.register(ToCHW)\nTRANSFORMS.register(ByolToRGB)\nTRANSFORMS.register(ByolRandomCrop)\nTRANSFORMS.register(ByolCenterCrop)\n\n\[email protected]()\nclass Clip():\n def __init__(self,min_val=0.0,max_val=1.0):\n self.min_val = min_val\n self.max_val = max_val\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be converted to grayscale.\n\n Returns:\n PIL Image: Cliped image.\n \"\"\"\n clip_img = img.clip(self.min_val,self.max_val)\n return clip_img\n\n\[email protected]()\nclass NormToOne():\n def __init__(self):\n pass\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be converted to grayscale.\n\n Returns:\n PIL Image: Randomly grayscaled image.\n \"\"\"\n norm_img = (img/255.).astype('float32')\n return norm_img\n\n\[email protected]()\nclass RandomApply():\n \"\"\"Apply randomly a list of transformations with a given probability\n\n Args:\n transforms (list or tuple): list of transformations\n p (float): probability\n \"\"\"\n\n def __init__(self, transforms, p=0.5):\n _transforms = []\n if isinstance(transforms, (list, tuple)):\n for transform in transforms:\n if isinstance(transform, dict):\n _transforms.append(build_transform(transform))\n else:\n _transforms.append(transform)\n\n self.transforms = _transforms\n self.p = p\n\n def __call__(self, img):\n if self.p < random.random():\n return img\n for t in self.transforms:\n img = t(img)\n return img\n\n\[email protected]()\nclass RandomGrayscale(object):\n \"\"\"Randomly convert image to grayscale with a probability of p (default 0.1).\n\n Args:\n p (float): probability that image should be converted to grayscale.\n\n Returns:\n PIL Image: Grayscale version of the input image with probability p and unchanged\n with probability (1-p).\n - If input image is 1 channel: grayscale version is 1 channel\n - If input image is 3 channel: grayscale version is 3 channel with r == g == b\n\n \"\"\"\n\n def __init__(self, p=0.1):\n self.p = p\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be converted to grayscale.\n\n Returns:\n PIL Image: Randomly grayscaled image.\n \"\"\"\n num_output_channels = 1 if img.mode == 'L' else 3\n\n if random.random()< self.p:\n return F.to_grayscale(img, num_output_channels=num_output_channels)\n return img\n\n\[email protected]()\nclass GaussianBlur(object):\n \"\"\"Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709\"\"\"\n\n def __init__(self, sigma=[.1, 2.], _PIL=False):\n self.sigma = sigma\n self.kernel_size = 23\n self._PIL = _PIL\n\n def __call__(self, x):\n sigma = np.random.uniform(self.sigma[0], self.sigma[1])\n if self._PIL: \n x = x.filter(ImageFilter.GaussianBlur(radius=sigma))\n return x\n else: \n import cv2\n x = cv2.GaussianBlur(np.array(x), (self.kernel_size, self.kernel_size), sigma)\n return Image.fromarray(x.astype(np.uint8))\n \n\[email protected]()\nclass Solarization(object):\n def __init__(self, threshold=128):\n self.threshold = threshold\n\n def __call__(self, sample):\n return ImageOps.solarize(sample, self.threshold)\n\n\[email protected]()\nclass ToRGB(object):\n def __init__(self, mode='RGB'):\n self.mode = mode\n\n def __call__(self, sample):\n if sample.mode != self.mode:\n return sample.convert(self.mode)\n return sample\n"
] |
[
[
"numpy.random.uniform",
"numpy.array"
]
] |
Syakyr/seldon-core
|
[
"938a8ffd19fc321eed4c7347ef56cc0b59f448dd"
] |
[
"python/tests/test_runtime_metrics_tags.py"
] |
[
"import os\nimport logging\nimport pytest\nimport numpy as np\nfrom google.protobuf import json_format\nimport json\n\nfrom seldon_core.flask_utils import SeldonMicroserviceException\nfrom seldon_core.proto import prediction_pb2\nfrom seldon_core.wrapper import (\n get_rest_microservice,\n get_metrics_microservice,\n SeldonModelGRPC,\n)\nfrom seldon_core.metrics import (\n SeldonMetrics,\n create_counter,\n create_gauge,\n create_timer,\n split_image_tag,\n validate_metrics,\n COUNTER,\n BINS,\n FEEDBACK_METRIC_METHOD_TAG,\n PREDICT_METRIC_METHOD_TAG,\n INPUT_TRANSFORM_METRIC_METHOD_TAG,\n OUTPUT_TRANSFORM_METRIC_METHOD_TAG,\n ROUTER_METRIC_METHOD_TAG,\n AGGREGATE_METRIC_METHOD_TAG,\n HEALTH_METRIC_METHOD_TAG,\n)\nfrom seldon_core.user_model import client_custom_metrics, SeldonResponse\n\n\nRUNTIME_METRICS = [\n {\"type\": \"GAUGE\", \"key\": \"runtime_gauge\", \"value\": 42},\n]\n\nRUNTIME_TAGS = {\"runtime\": \"tag\", \"shared\": \"right one\"}\nEXPECTED_TAGS = {\"static\": \"tag\", **RUNTIME_TAGS}\n\n\nclass UserObject:\n def predict(self, X, features_names):\n logging.info(\"Predict called\")\n return SeldonResponse(data=X, metrics=RUNTIME_METRICS, tags=RUNTIME_TAGS)\n\n def aggregate(self, X, features_names):\n logging.info(\"Aggregate called\")\n return SeldonResponse(data=X[0], metrics=RUNTIME_METRICS, tags=RUNTIME_TAGS)\n\n def transform_input(self, X, feature_names):\n logging.info(\"Transform input called\")\n return SeldonResponse(data=X, metrics=RUNTIME_METRICS, tags=RUNTIME_TAGS)\n\n def transform_output(self, X, feature_names):\n logging.info(\"Transform output called\")\n return SeldonResponse(data=X, metrics=RUNTIME_METRICS, tags=RUNTIME_TAGS)\n\n def route(self, X, feature_names):\n logging.info(\"Route called\")\n return SeldonResponse(data=22, metrics=RUNTIME_METRICS, tags=RUNTIME_TAGS)\n\n def send_feedback(self, X, feature_names, reward, truth, routing):\n return SeldonResponse(data=X, metrics=RUNTIME_METRICS, tags=RUNTIME_TAGS)\n\n def metrics(self):\n logging.info(\"Metrics called\")\n return [\n {\"type\": \"COUNTER\", \"key\": \"mycounter\", \"value\": 1},\n {\"type\": \"GAUGE\", \"key\": \"mygauge\", \"value\": 100},\n {\"type\": \"TIMER\", \"key\": \"mytimer\", \"value\": 20.2},\n {\n \"type\": \"GAUGE\",\n \"key\": \"customtag\",\n \"value\": 200,\n \"tags\": {\"mytag\": \"mytagvalue\"},\n },\n ]\n\n def tags(self):\n return {\"static\": \"tag\", \"shared\": \"not right one\"}\n\n\ndef verify_seldon_metrics(data, mycounter_value, histogram_entries, method):\n assert data[\"GAUGE\", \"runtime_gauge\"][\"value\"] == 42\n assert data[\"GAUGE\", \"mygauge\"][\"value\"] == 100\n assert data[\"GAUGE\", \"customtag\"][\"value\"] == 200\n assert data[\"GAUGE\", \"customtag\"][\"tags\"] == {\n \"mytag\": \"mytagvalue\",\n \"method\": method,\n }\n assert data[\"COUNTER\", \"mycounter\"][\"value\"] == mycounter_value\n assert np.allclose(\n np.histogram(histogram_entries, BINS)[0], data[\"TIMER\", \"mytimer\"][\"value\"][0]\n )\n assert np.allclose(data[\"TIMER\", \"mytimer\"][\"value\"][1], np.sum(histogram_entries))\n\n\[email protected](\"cls\", [UserObject])\ndef test_seldon_runtime_data_predict(cls, client_gets_metrics):\n user_object = cls()\n seldon_metrics = SeldonMetrics()\n\n app = get_rest_microservice(user_object, seldon_metrics)\n client = app.test_client()\n\n rv = client.get('/predict?json={\"data\": {\"names\": [\"input\"], \"ndarray\": [\"data\"]}}')\n assert rv.status_code == 200\n j = json.loads(rv.data)\n assert j[\"data\"][\"ndarray\"] == [\"data\"]\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 1, [0.0202], PREDICT_METRIC_METHOD_TAG)\n\n rv = client.get('/predict?json={\"data\": {\"names\": [\"input\"], \"ndarray\": [\"data\"]}}')\n assert rv.status_code == 200\n j = json.loads(rv.data)\n assert j[\"data\"][\"ndarray\"] == [\"data\"]\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 2, [0.0202, 0.0202], PREDICT_METRIC_METHOD_TAG)\n\n\[email protected](\"cls\", [UserObject])\ndef test_seldon_runtime_data_send_feedback(cls):\n user_object = cls()\n seldon_metrics = SeldonMetrics()\n\n app = get_rest_microservice(user_object, seldon_metrics)\n client = app.test_client()\n\n rv = client.get('/send-feedback?json={\"reward\": 42}')\n assert rv.status_code == 200\n j = json.loads(rv.data)\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 1, [0.0202], FEEDBACK_METRIC_METHOD_TAG)\n\n assert data[\"COUNTER\", \"seldon_api_model_feedback_reward\"] == {\n \"value\": 42.0,\n \"tags\": {\"method\": FEEDBACK_METRIC_METHOD_TAG},\n }\n\n rv = client.get('/send-feedback?json={\"reward\": 42}')\n assert rv.status_code == 200\n\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 2, [0.0202, 0.0202], FEEDBACK_METRIC_METHOD_TAG)\n\n assert data[\"COUNTER\", \"seldon_api_model_feedback_reward\"] == {\n \"value\": 84.0,\n \"tags\": {\"method\": FEEDBACK_METRIC_METHOD_TAG},\n }\n\n\[email protected](\"cls\", [UserObject])\ndef test_seldon_runtime_data_aggregate(cls, client_gets_metrics):\n user_object = cls()\n seldon_metrics = SeldonMetrics()\n\n app = get_rest_microservice(user_object, seldon_metrics)\n client = app.test_client()\n\n rv = client.get(\n '/aggregate?json={\"seldonMessages\": [{\"data\": {\"names\": [\"input\"], \"ndarray\": [\"data\"]}}]}'\n )\n assert rv.status_code == 200\n j = json.loads(rv.data)\n assert j[\"data\"][\"ndarray\"] == [\"data\"]\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 1, [0.0202], AGGREGATE_METRIC_METHOD_TAG)\n\n rv = client.get(\n '/aggregate?json={\"seldonMessages\": [{\"data\": {\"names\": [\"input\"], \"ndarray\": [\"data\"]}}]}'\n )\n assert rv.status_code == 200\n j = json.loads(rv.data)\n assert j[\"data\"][\"ndarray\"] == [\"data\"]\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 2, [0.0202, 0.0202], AGGREGATE_METRIC_METHOD_TAG)\n\n\[email protected](\"cls\", [UserObject])\ndef test_seldon_runtime_data_transform_input(cls, client_gets_metrics):\n user_object = cls()\n seldon_metrics = SeldonMetrics()\n\n app = get_rest_microservice(user_object, seldon_metrics)\n client = app.test_client()\n\n rv = client.get(\n '/transform-input?json={\"data\": {\"names\": [\"input\"], \"ndarray\": [\"data\"]}}'\n )\n assert rv.status_code == 200\n j = json.loads(rv.data)\n assert j[\"data\"][\"ndarray\"] == [\"data\"]\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 1, [0.0202], INPUT_TRANSFORM_METRIC_METHOD_TAG)\n\n rv = client.get(\n '/transform-input?json={\"data\": {\"names\": [\"input\"], \"ndarray\": [\"data\"]}}'\n )\n assert rv.status_code == 200\n j = json.loads(rv.data)\n assert j[\"data\"][\"ndarray\"] == [\"data\"]\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 2, [0.0202, 0.0202], INPUT_TRANSFORM_METRIC_METHOD_TAG)\n\n\[email protected](\"cls\", [UserObject])\ndef test_seldon_runtime_data_transform_output(cls, client_gets_metrics):\n user_object = cls()\n seldon_metrics = SeldonMetrics()\n\n app = get_rest_microservice(user_object, seldon_metrics)\n client = app.test_client()\n\n rv = client.get(\n '/transform-output?json={\"data\": {\"names\": [\"input\"], \"ndarray\": [\"data\"]}}'\n )\n assert rv.status_code == 200\n j = json.loads(rv.data)\n assert j[\"data\"][\"ndarray\"] == [\"data\"]\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 1, [0.0202], OUTPUT_TRANSFORM_METRIC_METHOD_TAG)\n\n rv = client.get(\n '/transform-output?json={\"data\": {\"names\": [\"input\"], \"ndarray\": [\"data\"]}}'\n )\n assert rv.status_code == 200\n j = json.loads(rv.data)\n assert j[\"data\"][\"ndarray\"] == [\"data\"]\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 2, [0.0202, 0.0202], OUTPUT_TRANSFORM_METRIC_METHOD_TAG)\n\n\[email protected](\"cls\", [UserObject])\ndef test_seldon_runtime_data_route(cls, client_gets_metrics):\n user_object = cls()\n seldon_metrics = SeldonMetrics()\n\n app = get_rest_microservice(user_object, seldon_metrics)\n client = app.test_client()\n\n rv = client.get('/route?json={\"data\": {\"names\": [\"input\"], \"ndarray\": [\"data\"]}}')\n assert rv.status_code == 200\n j = json.loads(rv.data)\n assert j[\"data\"][\"ndarray\"] == [[22]]\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 1, [0.0202], ROUTER_METRIC_METHOD_TAG)\n\n rv = client.get('/route?json={\"data\": {\"names\": [\"input\"], \"ndarray\": [\"data\"]}}')\n assert rv.status_code == 200\n j = json.loads(rv.data)\n assert j[\"data\"][\"ndarray\"] == [[22]]\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 2, [0.0202, 0.0202], ROUTER_METRIC_METHOD_TAG)\n\n\[email protected](\"cls\", [UserObject])\ndef test_proto_seldon_runtime_data_predict(cls, client_gets_metrics):\n user_object = cls()\n seldon_metrics = SeldonMetrics()\n\n app = SeldonModelGRPC(user_object, seldon_metrics)\n datadef = prediction_pb2.DefaultData(\n tensor=prediction_pb2.Tensor(shape=(2, 1), values=np.array([1, 2]))\n )\n\n request = prediction_pb2.SeldonMessage(data=datadef)\n\n resp = app.Predict(request, None)\n\n j = json.loads(json_format.MessageToJson(resp))\n assert j[\"data\"] == {\n \"names\": [\"t:0\"],\n \"tensor\": {\"shape\": [2, 1], \"values\": [1.0, 2.0]},\n }\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 1, [0.0202], PREDICT_METRIC_METHOD_TAG)\n resp = app.Predict(request, None)\n j = json.loads(json_format.MessageToJson(resp))\n assert j[\"data\"] == {\n \"names\": [\"t:0\"],\n \"tensor\": {\"shape\": [2, 1], \"values\": [1.0, 2.0]},\n }\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 2, [0.0202, 0.0202], PREDICT_METRIC_METHOD_TAG)\n\n\[email protected](\"cls\", [UserObject])\ndef test_proto_seldon_runtime_data_aggregate(cls, client_gets_metrics):\n user_object = cls()\n seldon_metrics = SeldonMetrics()\n\n app = SeldonModelGRPC(user_object, seldon_metrics)\n\n arr1 = np.array([1, 2])\n datadef1 = prediction_pb2.DefaultData(\n tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr1)\n )\n arr2 = np.array([3, 4])\n datadef2 = prediction_pb2.DefaultData(\n tensor=prediction_pb2.Tensor(shape=(2, 1), values=arr2)\n )\n msg1 = prediction_pb2.SeldonMessage(data=datadef1)\n msg2 = prediction_pb2.SeldonMessage(data=datadef2)\n\n request = prediction_pb2.SeldonMessageList(seldonMessages=[msg1, msg2])\n\n resp = app.Aggregate(request, None)\n j = json.loads(json_format.MessageToJson(resp))\n assert j[\"data\"] == {\n \"names\": [\"t:0\"],\n \"tensor\": {\"shape\": [2, 1], \"values\": [1.0, 2.0]},\n }\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 1, [0.0202], AGGREGATE_METRIC_METHOD_TAG)\n\n resp = app.Aggregate(request, None)\n j = json.loads(json_format.MessageToJson(resp))\n assert j[\"data\"] == {\n \"names\": [\"t:0\"],\n \"tensor\": {\"shape\": [2, 1], \"values\": [1.0, 2.0]},\n }\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 2, [0.0202, 0.0202], AGGREGATE_METRIC_METHOD_TAG)\n\n\[email protected](\"cls\", [UserObject])\ndef test_proto_seldon_runtime_data_transform_input(cls, client_gets_metrics):\n user_object = cls()\n seldon_metrics = SeldonMetrics()\n\n app = SeldonModelGRPC(user_object, seldon_metrics)\n datadef = prediction_pb2.DefaultData(\n tensor=prediction_pb2.Tensor(shape=(2, 1), values=np.array([1, 2]))\n )\n\n request = prediction_pb2.SeldonMessage(data=datadef)\n\n resp = app.TransformInput(request, None)\n j = json.loads(json_format.MessageToJson(resp))\n assert j[\"data\"] == {\n \"names\": [\"t:0\"],\n \"tensor\": {\"shape\": [2, 1], \"values\": [1.0, 2.0]},\n }\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 1, [0.0202], INPUT_TRANSFORM_METRIC_METHOD_TAG)\n\n resp = app.TransformInput(request, None)\n j = json.loads(json_format.MessageToJson(resp))\n assert j[\"data\"] == {\n \"names\": [\"t:0\"],\n \"tensor\": {\"shape\": [2, 1], \"values\": [1.0, 2.0]},\n }\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 2, [0.0202, 0.0202], INPUT_TRANSFORM_METRIC_METHOD_TAG)\n\n\[email protected](\"cls\", [UserObject])\ndef test_proto_seldon_runtime_data_transform_output(cls, client_gets_metrics):\n user_object = cls()\n seldon_metrics = SeldonMetrics()\n\n app = SeldonModelGRPC(user_object, seldon_metrics)\n datadef = prediction_pb2.DefaultData(\n tensor=prediction_pb2.Tensor(shape=(2, 1), values=np.array([1, 2]))\n )\n\n request = prediction_pb2.SeldonMessage(data=datadef)\n\n resp = app.TransformOutput(request, None)\n j = json.loads(json_format.MessageToJson(resp))\n assert j[\"data\"] == {\n \"names\": [\"t:0\"],\n \"tensor\": {\"shape\": [2, 1], \"values\": [1.0, 2.0]},\n }\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 1, [0.0202], OUTPUT_TRANSFORM_METRIC_METHOD_TAG)\n\n resp = app.TransformOutput(request, None)\n j = json.loads(json_format.MessageToJson(resp))\n assert j[\"data\"] == {\n \"names\": [\"t:0\"],\n \"tensor\": {\"shape\": [2, 1], \"values\": [1.0, 2.0]},\n }\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 2, [0.0202, 0.0202], OUTPUT_TRANSFORM_METRIC_METHOD_TAG)\n\n\[email protected](\"cls\", [UserObject])\ndef test_proto_seldon_runtime_data_route(cls, client_gets_metrics):\n user_object = cls()\n seldon_metrics = SeldonMetrics()\n\n app = SeldonModelGRPC(user_object, seldon_metrics)\n datadef = prediction_pb2.DefaultData(\n tensor=prediction_pb2.Tensor(shape=(2, 1), values=np.array([1, 2]))\n )\n\n request = prediction_pb2.SeldonMessage(data=datadef)\n resp = app.Route(request, None)\n j = json.loads(json_format.MessageToJson(resp))\n assert j[\"data\"] == {\n \"names\": [\"t:0\"],\n \"tensor\": {\"shape\": [1, 1], \"values\": [22.0]},\n }\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 1, [0.0202], ROUTER_METRIC_METHOD_TAG)\n resp = app.Route(request, None)\n j = json.loads(json_format.MessageToJson(resp))\n assert j[\"data\"] == {\n \"names\": [\"t:0\"],\n \"tensor\": {\"shape\": [1, 1], \"values\": [22.0]},\n }\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n\n data = seldon_metrics.data[os.getpid()]\n verify_seldon_metrics(data, 2, [0.0202, 0.0202], ROUTER_METRIC_METHOD_TAG)\n\n\[email protected](\"cls\", [UserObject])\ndef test_seldon_metrics_endpoint(cls, client_gets_metrics):\n def _match_label(line):\n _data, value = line.split()\n name, labels = _data.split()[0].split(\"{\")\n labels = labels[:-1]\n return name, value, eval(f\"dict({labels})\")\n\n def _iterate_metrics(text):\n for line in text.split(\"\\n\"):\n if not line or line[0] == \"#\":\n continue\n yield _match_label(line)\n\n user_object = cls()\n seldon_metrics = SeldonMetrics()\n\n app = get_rest_microservice(user_object, seldon_metrics)\n client = app.test_client()\n\n metrics_app = get_metrics_microservice(seldon_metrics)\n metrics_client = metrics_app.test_client()\n\n rv = metrics_client.get(\"/metrics\")\n assert rv.status_code == 200\n assert rv.data.decode() == \"\"\n\n rv = client.get('/predict?json={\"data\": {\"names\": [\"input\"], \"ndarray\": [\"data\"]}}')\n assert rv.status_code == 200\n assert (\"metrics\" in json.loads(rv.data)[\"meta\"]) == client_gets_metrics\n\n rv = metrics_client.get(\"/metrics\")\n text = rv.data.decode()\n\n timer_present = False\n for name, value, labels in _iterate_metrics(text):\n if name == \"mytimer_bucket\":\n timer_present = True\n\n if name == \"mycounter_total\":\n assert value == \"1.0\"\n assert labels[\"worker_id\"] == str(os.getpid())\n\n if name == \"mygauge\":\n assert value == \"100.0\"\n assert labels[\"worker_id\"] == str(os.getpid())\n\n if name == \"customtag\":\n assert value == \"200.0\"\n assert labels[\"mytag\"] == \"mytagvalue\"\n\n assert timer_present\n\n\[email protected](\"cls\", [UserObject])\ndef test_proto_seldon_metrics_endpoint(cls, client_gets_metrics):\n def _match_label(line):\n _data, value = line.split()\n name, labels = _data.split()[0].split(\"{\")\n labels = labels[:-1]\n return name, value, eval(f\"dict({labels})\")\n\n def _iterate_metrics(text):\n for line in text.split(\"\\n\"):\n if not line or line[0] == \"#\":\n continue\n yield _match_label(line)\n\n user_object = cls()\n seldon_metrics = SeldonMetrics()\n\n app = SeldonModelGRPC(user_object, seldon_metrics)\n datadef = prediction_pb2.DefaultData(\n tensor=prediction_pb2.Tensor(shape=(2, 1), values=np.array([1, 2]))\n )\n\n request = prediction_pb2.SeldonMessage(data=datadef)\n\n metrics_app = get_metrics_microservice(seldon_metrics)\n metrics_client = metrics_app.test_client()\n\n rv = metrics_client.get(\"/metrics\")\n assert rv.status_code == 200\n assert rv.data.decode() == \"\"\n\n resp = app.Predict(request, None)\n j = json.loads(json_format.MessageToJson(resp))\n assert j[\"data\"] == {\n \"names\": [\"t:0\"],\n \"tensor\": {\"shape\": [2, 1], \"values\": [1.0, 2.0]},\n }\n assert j[\"meta\"][\"tags\"] == EXPECTED_TAGS\n assert (\"metrics\" in j[\"meta\"]) == client_gets_metrics\n rv = metrics_client.get(\"/metrics\")\n text = rv.data.decode()\n\n timer_present = False\n for name, value, labels in _iterate_metrics(text):\n if name == \"mytimer_bucket\":\n timer_present = True\n\n if name == \"mycounter_total\":\n assert value == \"1.0\"\n assert labels[\"worker_id\"] == str(os.getpid())\n\n if name == \"mygauge\":\n assert value == \"100.0\"\n assert labels[\"worker_id\"] == str(os.getpid())\n\n if name == \"customtag\":\n assert value == \"200.0\"\n assert labels[\"mytag\"] == \"mytagvalue\"\n\n assert timer_present\n"
] |
[
[
"numpy.array",
"numpy.histogram",
"numpy.sum"
]
] |
federicovergallo/maskrcnn-benchmark
|
[
"5c7151415e252f210cb8fe93e12d52e32b8c7066"
] |
[
"demo/predictor.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport cv2\nimport torch\nfrom torchvision import transforms as T\nfrom torchvision.transforms import functional as F\nfrom maskrcnn_benchmark.modeling.detector import build_detection_model\nfrom maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\nfrom maskrcnn_benchmark.structures.image_list import to_image_list\nfrom maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker\nfrom maskrcnn_benchmark import layers as L\nfrom maskrcnn_benchmark.utils import cv2_util\n\nclass Resize(object):\n def __init__(self, min_size, max_size):\n self.min_size = min_size\n self.max_size = max_size\n\n # modified from torchvision to add support for max size\n def get_size(self, image_size):\n w, h = image_size\n size = self.min_size\n max_size = self.max_size\n if max_size is not None:\n min_original_size = float(min((w, h)))\n max_original_size = float(max((w, h)))\n if max_original_size / min_original_size * size > max_size:\n size = int(round(max_size * min_original_size / max_original_size))\n\n if (w <= h and w == size) or (h <= w and h == size):\n return (h, w)\n\n if w < h:\n ow = size\n oh = int(size * h / w)\n else:\n oh = size\n ow = int(size * w / h)\n\n return (oh, ow)\n\n def __call__(self, image):\n size = self.get_size(image.size)\n image = F.resize(image, size)\n return image\nclass COCODemo(object):\n # COCO categories for pretty print\n CATEGORIES = [\n \"__background\",\n \"person\",\n \"bicycle\",\n \"car\",\n \"motorcycle\",\n \"airplane\",\n \"bus\",\n \"train\",\n \"truck\",\n \"boat\",\n \"traffic light\",\n \"fire hydrant\",\n \"stop sign\",\n \"parking meter\",\n \"bench\",\n \"bird\",\n \"cat\",\n \"dog\",\n \"horse\",\n \"sheep\",\n \"cow\",\n \"elephant\",\n \"bear\",\n \"zebra\",\n \"giraffe\",\n \"backpack\",\n \"umbrella\",\n \"handbag\",\n \"tie\",\n \"suitcase\",\n \"frisbee\",\n \"skis\",\n \"snowboard\",\n \"sports ball\",\n \"kite\",\n \"baseball bat\",\n \"baseball glove\",\n \"skateboard\",\n \"surfboard\",\n \"tennis racket\",\n \"bottle\",\n \"wine glass\",\n \"cup\",\n \"fork\",\n \"knife\",\n \"spoon\",\n \"bowl\",\n \"banana\",\n \"apple\",\n \"sandwich\",\n \"orange\",\n \"broccoli\",\n \"carrot\",\n \"hot dog\",\n \"pizza\",\n \"donut\",\n \"cake\",\n \"chair\",\n \"couch\",\n \"potted plant\",\n \"bed\",\n \"dining table\",\n \"toilet\",\n \"tv\",\n \"laptop\",\n \"mouse\",\n \"remote\",\n \"keyboard\",\n \"cell phone\",\n \"microwave\",\n \"oven\",\n \"toaster\",\n \"sink\",\n \"refrigerator\",\n \"book\",\n \"clock\",\n \"vase\",\n \"scissors\",\n \"teddy bear\",\n \"hair drier\",\n \"toothbrush\",\n ]\n\n def __init__(\n self,\n cfg,\n confidence_threshold=0.7,\n show_mask_heatmaps=False,\n masks_per_dim=2,\n min_image_size=224,\n weight_loading = None\n ):\n self.cfg = cfg.clone()\n self.model = build_detection_model(cfg)\n self.model.eval()\n self.device = torch.device(cfg.MODEL.DEVICE)\n self.model.to(self.device)\n self.min_image_size = min_image_size\n\n save_dir = cfg.OUTPUT_DIR\n checkpointer = DetectronCheckpointer(cfg, self.model, save_dir=save_dir)\n _ = checkpointer.load(cfg.MODEL.WEIGHT)\n \n if weight_loading:\n print('Loading weight from {}.'.format(weight_loading))\n _ = checkpointer._load_model(torch.load(weight_loading))\n \n self.transforms = self.build_transform()\n\n mask_threshold = -1 if show_mask_heatmaps else 0.5\n self.masker = Masker(threshold=mask_threshold, padding=1)\n\n # used to make colors for each class\n self.palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n\n self.cpu_device = torch.device(\"cpu\")\n self.confidence_threshold = confidence_threshold\n self.show_mask_heatmaps = show_mask_heatmaps\n self.masks_per_dim = masks_per_dim\n\n def build_transform(self):\n \"\"\"\n Creates a basic transformation that was used to train the models\n \"\"\"\n cfg = self.cfg\n\n # we are loading images with OpenCV, so we don't need to convert them\n # to BGR, they are already! So all we need to do is to normalize\n # by 255 if we want to convert to BGR255 format, or flip the channels\n # if we want it to be in RGB in [0-1] range.\n if cfg.INPUT.TO_BGR255:\n to_bgr_transform = T.Lambda(lambda x: x * 255)\n else:\n to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n\n normalize_transform = T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n )\n min_size = cfg.INPUT.MIN_SIZE_TEST\n max_size = cfg.INPUT.MAX_SIZE_TEST\n transform = T.Compose(\n [\n T.ToPILImage(),\n Resize(min_size, max_size),\n T.ToTensor(),\n to_bgr_transform,\n normalize_transform,\n ]\n )\n return transform\n\n def run_on_opencv_image(self, image):\n \"\"\"\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n\n Returns:\n prediction (BoxList): the detected objects. Additional information\n of the detection properties can be found in the fields of\n the BoxList via `prediction.fields()`\n \"\"\"\n predictions = self.compute_prediction(image)\n top_predictions = self.select_top_predictions(predictions)\n \n result = image.copy()\n if self.show_mask_heatmaps:\n return self.create_mask_montage(result, top_predictions)\n result = self.overlay_boxes(result, top_predictions)\n if self.cfg.MODEL.MASK_ON:\n result = self.overlay_mask(result, top_predictions)\n if self.cfg.MODEL.KEYPOINT_ON:\n result = self.overlay_keypoints(result, top_predictions)\n result = self.overlay_class_names(result, top_predictions)\n\n return result\n\n def compute_prediction(self, original_image):\n \"\"\"\n Arguments:\n original_image (np.ndarray): an image as returned by OpenCV\n\n Returns:\n prediction (BoxList): the detected objects. Additional information\n of the detection properties can be found in the fields of\n the BoxList via `prediction.fields()`\n \"\"\"\n # apply pre-processing to image\n image = self.transforms(original_image)\n \n # convert to an ImageList, padded so that it is divisible by\n # cfg.DATALOADER.SIZE_DIVISIBILITY\n image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)\n image_list = image_list.to(self.device)\n \n # compute predictions\n with torch.no_grad():\n predictions = self.model(image_list)\n predictions = [o.to(self.cpu_device) for o in predictions]\n # always single image is passed at a time\n prediction = predictions[0]\n\n # reshape prediction (a BoxList) into the original image size\n height, width = original_image.shape[:-1]\n prediction = prediction.resize((width, height))\n\n if prediction.has_field(\"mask\"):\n # if we have masks, paste the masks in the right position\n # in the image, as defined by the bounding boxes\n masks = prediction.get_field(\"mask\")\n # always single image is passed at a time\n masks = self.masker([masks], [prediction])[0]\n prediction.add_field(\"mask\", masks)\n return prediction\n\n def select_top_predictions(self, predictions):\n \"\"\"\n Select only predictions which have a `score` > self.confidence_threshold,\n and returns the predictions in descending order of score\n\n Arguments:\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `scores`.\n\n Returns:\n prediction (BoxList): the detected objects. Additional information\n of the detection properties can be found in the fields of\n the BoxList via `prediction.fields()`\n \"\"\"\n # Remove not in our interest labels\n labels = predictions.extra_fields['labels']\n labels_interest = ((labels == 1) | (labels == 2) | (labels == 3))\n keep = torch.nonzero(labels_interest != 0).squeeze(1)\n predictions = predictions[keep]\n\n scores = predictions.get_field(\"scores\")\n keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)\n predictions = predictions[keep]\n\n scores = predictions.get_field(\"scores\")\n _, idx = scores.sort(0, descending=True)\n return predictions[idx]\n\n def compute_colors_for_labels(self, labels):\n \"\"\"\n Simple function that adds fixed colors depending on the class\n \"\"\"\n colors = labels[:, None] * self.palette\n colors = (colors % 255).numpy().astype(\"uint8\")\n return colors\n\n def overlay_boxes(self, image, predictions):\n \"\"\"\n Adds the predicted boxes on top of the image\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `labels`.\n \"\"\"\n labels = predictions.get_field(\"labels\")\n boxes = predictions.bbox\n\n colors = self.compute_colors_for_labels(labels).tolist()\n\n for box, color in zip(boxes, colors):\n box = box.to(torch.int64)\n top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n image = cv2.rectangle(\n image, tuple(top_left), tuple(bottom_right), tuple(color), 1\n )\n\n return image\n\n def overlay_mask(self, image, predictions):\n \"\"\"\n Adds the instances contours for each predicted object.\n Each label has a different color.\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `mask` and `labels`.\n \"\"\"\n masks = predictions.get_field(\"mask\").numpy()\n labels = predictions.get_field(\"labels\")\n\n colors = self.compute_colors_for_labels(labels).tolist()\n\n for mask, color in zip(masks, colors):\n thresh = mask[0, :, :, None].astype(np.uint8)\n contours, hierarchy = cv2_util.findContours(\n thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE\n )\n image = cv2.drawContours(image, contours, -1, color, 3)\n\n composite = image\n\n return composite\n\n def overlay_keypoints(self, image, predictions):\n keypoints = predictions.get_field(\"keypoints\")\n kps = keypoints.keypoints\n scores = keypoints.get_field(\"logits\")\n kps = torch.cat((kps[:, :, 0:2], scores[:, :, None]), dim=2).numpy()\n for region in kps:\n image = vis_keypoints(image, region.transpose((1, 0)))\n return image\n\n def create_mask_montage(self, image, predictions):\n \"\"\"\n Create a montage showing the probability heatmaps for each one one of the\n detected objects\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `mask`.\n \"\"\"\n masks = predictions.get_field(\"mask\")\n masks_per_dim = self.masks_per_dim\n masks = L.interpolate(\n masks.float(), scale_factor=1 / masks_per_dim\n ).byte()\n height, width = masks.shape[-2:]\n max_masks = masks_per_dim ** 2\n masks = masks[:max_masks]\n # handle case where we have less detections than max_masks\n if len(masks) < max_masks:\n masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8)\n masks_padded[: len(masks)] = masks\n masks = masks_padded\n masks = masks.reshape(masks_per_dim, masks_per_dim, height, width)\n result = torch.zeros(\n (masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8\n )\n for y in range(masks_per_dim):\n start_y = y * height\n end_y = (y + 1) * height\n for x in range(masks_per_dim):\n start_x = x * width\n end_x = (x + 1) * width\n result[start_y:end_y, start_x:end_x] = masks[y, x]\n return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)\n\n def overlay_class_names(self, image, predictions):\n \"\"\"\n Adds detected class names and scores in the positions defined by the\n top-left corner of the predicted bounding box\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `scores` and `labels`.\n \"\"\"\n scores = predictions.get_field(\"scores\").tolist()\n labels = predictions.get_field(\"labels\").tolist()\n labels = [self.CATEGORIES[i] for i in labels]\n boxes = predictions.bbox\n\n template = \"{}: {:.2f}\"\n for box, score, label in zip(boxes, scores, labels):\n x, y = box[:2]\n s = template.format(label, score)\n cv2.putText(\n image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1\n )\n\n return image\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom maskrcnn_benchmark.structures.keypoint import PersonKeypoints\n\ndef vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):\n \"\"\"Visualizes keypoints (adapted from vis_one_image).\n kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).\n \"\"\"\n dataset_keypoints = PersonKeypoints.NAMES\n kp_lines = PersonKeypoints.CONNECTIONS\n\n # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.\n cmap = plt.get_cmap('rainbow')\n colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]\n colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]\n\n # Perform the drawing on a copy of the image, to allow for blending.\n kp_mask = np.copy(img)\n\n # Draw mid shoulder / mid hip first for better visualization.\n mid_shoulder = (\n kps[:2, dataset_keypoints.index('right_shoulder')] +\n kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0\n sc_mid_shoulder = np.minimum(\n kps[2, dataset_keypoints.index('right_shoulder')],\n kps[2, dataset_keypoints.index('left_shoulder')])\n mid_hip = (\n kps[:2, dataset_keypoints.index('right_hip')] +\n kps[:2, dataset_keypoints.index('left_hip')]) / 2.0\n sc_mid_hip = np.minimum(\n kps[2, dataset_keypoints.index('right_hip')],\n kps[2, dataset_keypoints.index('left_hip')])\n nose_idx = dataset_keypoints.index('nose')\n if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:\n cv2.line(\n kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),\n color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)\n if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:\n cv2.line(\n kp_mask, tuple(mid_shoulder), tuple(mid_hip),\n color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)\n\n # Draw the keypoints.\n for l in range(len(kp_lines)):\n i1 = kp_lines[l][0]\n i2 = kp_lines[l][1]\n p1 = kps[0, i1], kps[1, i1]\n p2 = kps[0, i2], kps[1, i2]\n if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:\n cv2.line(\n kp_mask, p1, p2,\n color=colors[l], thickness=2, lineType=cv2.LINE_AA)\n if kps[2, i1] > kp_thresh:\n cv2.circle(\n kp_mask, p1,\n radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n if kps[2, i2] > kp_thresh:\n cv2.circle(\n kp_mask, p2,\n radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n\n # Blend the keypoints.\n return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib.pylab as pylab\nimport requests\nfrom io import BytesIO\nfrom PIL import Image\nimport numpy as np\n# this makes our figures bigger\npylab.rcParams['figure.figsize'] = 20, 12\nfrom maskrcnn_benchmark.config import cfg\n\n\ndef imshow(img):\n plt.imshow(img[:, :, [2, 1, 0]])\n plt.axis(\"off\")\n\n\nif __name__ == '__main__':\n config_file = \"../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml\"\n\n # update the config options with the config file\n cfg.merge_from_file(config_file)\n # manual override some options\n cfg.merge_from_list([\"MODEL.DEVICE\", \"cpu\"])\n\n # COCOdemo object\n coco_demo = COCODemo(\n cfg,\n min_image_size=720,\n confidence_threshold=0.7,\n )\n # Read images\n image_1 = cv2.imread('frame_10.jpg')\n predictions = coco_demo.run_on_opencv_image(image_1)\n imshow(predictions)\n"
] |
[
[
"matplotlib.pyplot.imshow",
"torch.zeros",
"torch.load",
"torch.cat",
"matplotlib.pyplot.get_cmap",
"torch.tensor",
"numpy.copy",
"torch.no_grad",
"torch.nonzero",
"matplotlib.pyplot.axis",
"torch.device"
]
] |
alekshiidenhovi/Helsinki-University-Data-Analysis-with-Python
|
[
"bc27fa585d22d630a38312ee7c4b2173d5b80d12"
] |
[
"part05-e01_split_date_continues/test/test_split_date_continues.py"
] |
[
"#!/usr/bin/env python3\n\nimport unittest\nfrom unittest.mock import patch\nimport numpy as np\nimport pandas as pd\n\nfrom tmc import points\n\nfrom tmc.utils import load, get_stdout, patch_helper\n\nmodule_name=\"src.split_date_continues\"\nsplit_date_continues = load(module_name, \"split_date_continues\")\nmain = load(module_name, \"main\")\nph = patch_helper(module_name)\n\n@points('p05-01.1')\nclass SplitDateContinues(unittest.TestCase):\n\n # @classmethod\n # def setUpClass(cls):\n # cls.df = split_date_continues()\n\n def setUp(self):\n self.df = split_date_continues()\n \n def test_shape(self):\n self.assertEqual(self.df.shape, (37128, 25), msg=\"Incorrect shape!\")\n\n def test_columns(self):\n np.testing.assert_array_equal(self.df.columns[:6],\n ['Weekday', 'Day', 'Month', 'Year', 'Hour', 'Auroransilta'],\n err_msg=\"First six column names were incorrect!\")\n\n def test_dtypes(self):\n np.testing.assert_array_equal(self.df.dtypes[:6],\n [object, int, int, int, int, float],\n err_msg=\"Incorrect column types in first six columns!\")\n\n def test_content(self):\n value = self.df.loc[0, \"Auroransilta\"]\n self.assertTrue(np.isnan(value),\n msg=\"Incorrect value on row 0 column Auroransilta, expected NaN got %f!\" % value)\n self.assertEqual(self.df.loc[0, \"Baana\"], 8.0,\n msg=\"Incorrect value on row 0 column Baana!\")\n \n def test_calls(self):\n with patch(ph(\"split_date_continues\"), wraps=split_date_continues) as psplit,\\\n patch(ph(\"pd.read_csv\"), wraps=pd.read_csv) as prc,\\\n patch(ph(\"pd.concat\"), wraps=pd.concat) as pconcat:\n main()\n psplit.assert_called_once()\n prc.assert_called_once()\n pconcat.assert_called()\n\nif __name__ == '__main__':\n unittest.main()\n \n"
] |
[
[
"numpy.testing.assert_array_equal",
"numpy.isnan"
]
] |
yookyungkho/Hand-to-Hand
|
[
"c0ae9a9c1e5ea894c7daf2c2bf2f7c752e31e8b7"
] |
[
"stylegan2_basic/dataset/get_tfrecords.py"
] |
[
"import os\nimport glob\nimport tensorflow as tf\n\n\ndef parse_tfrecord(raw_record, res):\n feature_description = {\n 'label': tf.io.FixedLenFeature([259], tf.float32),\n 'image': tf.io.FixedLenFeature([], tf.string, default_value=''),\n }\n\n # parse feature\n parsed = tf.io.parse_single_example(raw_record, feature_description)\n\n # labels\n labels = tf.reshape(parsed['label'], shape=[259])\n\n # image\n image = tf.io.decode_png(parsed['image'])\n image = tf.image.resize(image, size=[res, res])\n image = tf.transpose(image, perm=[2, 0, 1])\n image = tf.cast(image, dtype=tf.dtypes.float32)\n image = image / 127.5 - 1.0\n image.set_shape([3, res, res])\n return image, labels\n\n\ndef input_fn(filenames, res, batch_size, epochs):\n files = tf.data.Dataset.from_tensor_slices(filenames)\n dataset = files.interleave(lambda x: tf.data.TFRecordDataset(x),\n # cycle_length: number of files to read concurrently\n # If you want to read from all your files(*.tfrecord) to create a batch,\n # set this to the number of files\n cycle_length=len(filenames),\n # block_length: each time we read from a file, reads block_length elements from this file\n block_length=1)\n dataset = dataset.map(lambda x: parse_tfrecord(x, res), num_parallel_calls=8)\n dataset = dataset.shuffle(buffer_size=1000, reshuffle_each_iteration=True)\n dataset = dataset.repeat(epochs)\n dataset = dataset.batch(batch_size)\n dataset = dataset.prefetch(buffer_size=None)\n return dataset\n\n\ndef get_dataset(tfrecord_dir, res, batch_size, is_train):\n # get dataset\n filenames = glob.glob(os.path.join(tfrecord_dir, '*.tfrecord'))\n filenames = sorted(filenames)\n train_filenames = filenames[:-1]\n val_filenames = filenames[-1:]\n filenames = train_filenames if is_train else val_filenames\n\n # create dataset\n dataset = input_fn(filenames, res, batch_size=batch_size, epochs=None)\n return dataset\n\n\ndef get_label_depth():\n # define label spec\n label_depths = {\n 'label_dim': 259,\n # 'label_dim': 258,\n # 'person_dim': 4,\n # 'keypoint_dim': 254,\n }\n return label_depths\n\n\ndef main():\n import numpy as np\n from PIL import Image\n\n # res = 256\n res = 512\n data_base_dir = '../data'\n folder_name = 'tfrecords'\n tfrecord_dir = os.path.join(data_base_dir, folder_name)\n\n batch_size = 2\n is_train = True\n\n # label_depths = get_label_depth()\n dataset = get_dataset(tfrecord_dir, res, batch_size, is_train)\n\n for ii in range(3): # 이미지, 라벨 3개만 출력\n for real_images, labels in dataset.take(1):\n image_raw = real_images.numpy()\n image_raw = image_raw[0]\n image_raw = np.transpose(image_raw, axes=[1, 2, 0])\n image_raw = (image_raw + 1.0) * 127.5 # -1~1 -> 0~255\n image = Image.fromarray(np.uint8(image_raw))\n image.show()\n print(labels[0])\n return\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"tensorflow.transpose",
"tensorflow.io.decode_png",
"tensorflow.data.TFRecordDataset",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.reshape",
"tensorflow.io.parse_single_example",
"tensorflow.cast",
"numpy.uint8",
"tensorflow.io.FixedLenFeature",
"tensorflow.image.resize",
"numpy.transpose"
]
] |
zhanghang1989/incubator-mxnet
|
[
"bfa6e28d9c1886de737061c213eb21010f377a23"
] |
[
"tests/python/gpu/test_operator_gpu.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import print_function\nimport sys\nimport os\nimport time\nimport multiprocessing as mp\nimport unittest\nimport mxnet as mx\nimport numpy as np\nimport unittest\nfrom nose.tools import assert_raises\nfrom mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal\nfrom mxnet.base import MXNetError\nfrom mxnet import autograd\nfrom numpy.testing import assert_allclose\n\ncurr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\nsys.path.insert(0, os.path.join(curr_path, '../unittest'))\nfrom common import setup_module, with_seed\nfrom test_operator import *\nfrom test_optimizer import *\nfrom test_random import *\nfrom test_gluon import *\nfrom test_loss import *\nfrom test_exc_handling import *\n#from test_rnn import *\nfrom test_gluon_rnn import *\nfrom test_sparse_ndarray import test_create_csr, test_create_row_sparse, test_sparse_nd_slice\nfrom test_sparse_ndarray import test_create_sparse_nd_empty, test_create_sparse_nd_from_sparse\nfrom test_sparse_ndarray import test_create_sparse_nd_from_dense, test_create_sparse_nd_infer_shape\nfrom test_sparse_ndarray import test_sparse_nd_check_format, test_sparse_nd_copy\nfrom test_sparse_ndarray import test_sparse_nd_setitem, test_sparse_nd_binary_scalar_op\nfrom test_sparse_operator import *\nfrom test_ndarray import *\n\nset_default_context(mx.gpu(0))\ndel test_support_vector_machine_l1_svm\ndel test_support_vector_machine_l2_svm\n\n\ndef check_countsketch(in_dim,out_dim,n):\n sym = mx.sym.contrib.count_sketch(name='countsketch',out_dim = out_dim)\n shape = [(n,in_dim), (1,in_dim),(1,in_dim)] #shape of input x, hash h and hash s\n\n arr = [mx.nd.empty(shape[i]) for i in range(3)]\n arr_grad = [mx.nd.empty(shape[i]) for i in range(3)]\n x = np.random.uniform(-10, 10, shape[0])\n arr[0][:] = x #input x\n h = np.random.randint(0, out_dim, shape[1])\n arr[1][:] = h #hash h\n s = np.random.randint(0, 2, shape[2])*2-np.ones(shape[2])\n arr[2][:] = s #hash s\n # forward\n exe_list = [sym.bind(mx.gpu(0), arr, arr_grad)]\n for exe in exe_list:\n exe.forward(is_train= True)\n out1 = [exe.outputs[0].asnumpy() for exe in exe_list]\n\n a = np.zeros((n,out_dim))\n temp = np.multiply(x, s)\n for num_sample in np.arange(0,n):\n for idx in np.arange(0,in_dim):\n a[num_sample][h[0][idx]] += temp[num_sample][idx]\n assert_almost_equal(a,out1[0],rtol=1e-3, atol=1e-12)\n\n # backward\n out_grad = mx.nd.empty((n,out_dim))\n out_grad[:] = np.random.normal(-3, 3, (n,out_dim))\n for exe in exe_list:\n exe.backward([out_grad])\n\n a = np.zeros((n,in_dim))\n for j in np.arange(0,n):\n for i in np.arange(0,in_dim):\n a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]\n assert_almost_equal(a,arr_grad[0].asnumpy(),rtol=1e-3, atol=1e-12)\n\n@with_seed(0)\ndef test_countsketch():\n nrepeat = 2\n minindim = 40\n maxindim = 100\n minoutdim = 5\n maxoutdim = 30\n maxn = 200\n for repeat in range(nrepeat):\n in_dim = np.random.randint(minindim, maxindim)\n out_dim = np.random.randint(minoutdim, maxoutdim)\n n = np.random.randint(1,maxn)\n check_countsketch(in_dim, out_dim, n)\n\n\ndef check_ifft(shape):\n shape_old = shape\n if len(shape) == 2:\n if shape[1]%2 != 0:\n lst = list(shape)\n lst[1] = lst[1]*2\n shape = tuple(lst)\n shape_old = shape\n shape = (shape[0],shape[1]*2)\n if len(shape) == 4:\n if shape[3]%2 != 0:\n lst = list(shape)\n lst[3] = lst[3]*2\n shape = tuple(lst)\n shape_old = shape\n shape = (shape[0],shape[1],shape[2],shape[3]*2)\n sym = mx.sym.contrib.ifft(name='ifft', compute_size = 128)\n init = [np.random.normal(size=shape, scale=1.0)]\n arr_grad = [mx.nd.empty(shape)]\n ctx_list = [{'ctx': mx.gpu(0),'ifft_data': shape, 'type_dict': {'ifft_data': np.float32}}]\n exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]\n\n for exe in exe_list:\n for arr, iarr in zip(exe.arg_arrays, init):\n arr[:] = iarr.astype(arr.dtype)\n # forward\n for exe in exe_list:\n exe.forward(is_train= True)\n out1 = [exe.outputs[0].asnumpy() for exe in exe_list]\n\n if len(shape) == 2:\n init_complex = np.zeros(shape_old,dtype = np.complex64)\n for i in range(0,shape_old[1]):\n init_complex.real[:,i] = init[0][:,2*i]\n init_complex.imag[:,i] = init[0][:,2*i+1]\n a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)\n assert_almost_equal(a.real, out1[0]/shape_old[1],rtol=1e-3, atol=1e-12)\n\n if len(shape) == 4:\n init_complex = np.zeros(shape_old,dtype = np.complex64)\n for i in range(0,shape_old[3]):\n init_complex.real[:,:,:,i] = init[0][:,:,:,2*i]\n init_complex.imag[:,:,:,i] = init[0][:,:,:,2*i+1]\n a = np.fft.ifft(init_complex, n=None, axis=-1, norm=None)\n assert_almost_equal(a.real, out1[0]/shape_old[3],rtol=1e-3, atol=1e-12)\n # backward\n if len(shape) == 2:\n out_grad = mx.nd.empty(shape_old)\n out_grad[:] = np.random.normal(-3, 3, shape_old)\n for exe in exe_list:\n exe.backward([out_grad])\n temp = exe.grad_arrays[0].asnumpy()\n temp = np.zeros(shape_old)\n for i in range(shape_old[1]):\n temp[:,i] = exe.grad_arrays[0].asnumpy()[:,2*i]\n\n a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)\n assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-12)\n if len(shape) == 4:\n out_grad = mx.nd.empty(shape_old)\n out_grad[:] = np.random.normal(-3, 3, shape_old)\n for exe in exe_list:\n exe.backward([out_grad])\n temp = exe.grad_arrays[0].asnumpy()\n temp = np.zeros(shape_old)\n for i in range(shape_old[3]):\n temp[:,:,:,i] = exe.grad_arrays[0].asnumpy()[:,:,:,2*i]\n\n a = np.fft.fft(out_grad.asnumpy(), n=None, axis=-1, norm=None)\n assert_almost_equal(a.real, temp, rtol=1e-3, atol=1e-12)\n\n@with_seed(0)\ndef test_ifft():\n nrepeat = 2\n maxdim = 10\n for repeat in range(nrepeat):\n for order in [2,4]:\n shape = tuple(np.random.randint(1, maxdim, size=order))\n check_ifft(shape)\n\n\ndef check_fft(shape):\n sym = mx.sym.contrib.fft(name='fft', compute_size = 128)\n if len(shape) == 2:\n if shape[1]%2 != 0:\n lst = list(shape)\n lst[1] = lst[1]*2\n shape = tuple(lst)\n shape_old = shape\n if len(shape) == 4:\n if shape[3]%2 != 0:\n lst = list(shape)\n lst[3] = lst[3]*2\n shape = tuple(lst)\n shape_old = shape\n init = [np.random.normal(size=shape, scale=1.0)]\n arr_grad = [mx.nd.empty(shape)]\n ctx_list = [{'ctx': mx.gpu(0),'fft_data': shape, 'type_dict': {'fft_data': np.float32}}]\n exe_list = [sym.simple_bind(args_grad=arr_grad,**ctx) for ctx in ctx_list]\n\n for exe in exe_list:\n for arr, iarr in zip(exe.arg_arrays, init):\n arr[:] = iarr.astype(arr.dtype)\n #forward\n for exe in exe_list:\n exe.forward(is_train=True)\n out1 = [exe.outputs[0].asnumpy() for exe in exe_list]\n out = np.fft.fft(init, n=None, axis=-1, norm=None)\n if len(shape) == 2:\n out = np.reshape(out,(out.shape[1],out.shape[2]))\n out2 = np.append(out.real, out.imag, axis = 1)\n a = np.zeros(out1[0].shape)\n p = 0\n for i in range(out2.shape[1]//2):\n a[:,p] = out2[:,i]\n a[:,p+1] = out2[:,i+out2.shape[1]//2]\n p = p+2\n\n if len(shape) == 4:\n out = np.reshape(out,(out.shape[1],out.shape[2],out.shape[3],out.shape[4]))\n out2 = np.append(out.real, out.imag, axis = 1)\n a = np.zeros(out1[0].shape)\n for i in range(out1[0].shape[0]):\n for j in range(out1[0].shape[1]):\n p = 0\n for k in range(out2.shape[3]):\n a[i,j,:,p] = out2[i,j,:,k]\n a[i,j,:,p+1] = out2[i,j+out1[0].shape[1],:,k]\n p = p+2\n\n assert_almost_equal(a, out1[0],rtol=1e-3, atol=1e-6)\n\n # backward\n if len(shape) == 2:\n out_grad = mx.nd.empty((shape[0],2*shape[1]))\n out_grad[:] = np.random.normal(-3, 3, (shape[0],2*shape[1]))\n # out_grad_to_complex\n out_grad_complex = np.zeros(shape,dtype = np.complex64)\n for i in range(0,shape[1]):\n out_grad_complex.real[:,i] = out_grad.asnumpy()[:,2*i]\n out_grad_complex.imag[:,i] = out_grad.asnumpy()[:,2*i+1]\n for exe in exe_list:\n exe.backward([out_grad])\n a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)\n assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[1],rtol=1e-3, atol=1e-8)\n\n if len(shape) == 4:\n out_grad = mx.nd.empty(out1[0].shape)\n out_grad[:] = np.random.normal(-3, 3, out1[0].shape)\n # out_grad_to_complex\n out_grad_complex = np.zeros(shape,dtype = np.complex64)\n for i in range(0,shape[3]):\n out_grad_complex.real[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i]\n out_grad_complex.imag[:,:,:,i] = out_grad.asnumpy()[:,:,:,2*i+1]\n for exe in exe_list:\n exe.backward([out_grad])\n a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)\n assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[3],rtol=1e-3, atol=1e-6)\n\n@with_seed(0)\ndef test_fft():\n nrepeat = 2\n maxdim = 10\n for repeat in range(nrepeat):\n for order in [2,4]:\n shape = tuple(np.random.randint(1, maxdim, size=order))\n check_fft(shape)\n\n\n@with_seed()\ndef test_batchnorm_with_type():\n ctx_list_v1_2D = [\n {'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},\n {'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},\n ]\n\n ctx_list_v2_2D = [\n {'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},\n {'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float16}},\n {'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float64}},\n {'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},\n {'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float16}},\n {'ctx': mx.gpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float64}},\n ]\n\n ctx_list_v2_1D = [\n {'ctx': mx.cpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float16}},\n {'ctx': mx.cpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float32}},\n {'ctx': mx.cpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float64}},\n {'ctx': mx.gpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float16}},\n {'ctx': mx.gpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float32}},\n {'ctx': mx.gpu(0), 'norm_data': (10, 2, 10), 'type_dict': {'norm_data': np.float64}},\n ]\n\n ctx_list_v2_3D = [\n {'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}},\n {'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}},\n {'ctx': mx.cpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}},\n {'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float16}},\n {'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float32}},\n {'ctx': mx.gpu(0), 'norm_data': (4, 2, 3, 5, 5), 'type_dict': {'norm_data': np.float64}}\n ]\n\n # V1, 2D\n sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=False)\n check_consistency(sym, ctx_list_v1_2D)\n sym = mx.sym.BatchNorm_v1(name='norm', fix_gamma=True)\n check_consistency(sym, ctx_list_v1_2D)\n\n\n # V2, 2D\n sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)\n check_consistency(sym, ctx_list_v2_2D)\n sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)\n check_consistency(sym, ctx_list_v2_2D)\n sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)\n check_consistency(sym, ctx_list_v2_2D)\n sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)\n check_consistency(sym, ctx_list_v2_2D)\n\n # V2, 1D\n sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)\n check_consistency(sym, ctx_list_v2_1D)\n sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)\n check_consistency(sym, ctx_list_v2_1D)\n sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)\n check_consistency(sym, ctx_list_v2_1D)\n sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)\n check_consistency(sym, ctx_list_v2_1D)\n #\n # # V2, 3D\n sym = mx.sym.BatchNorm(name='norm', fix_gamma=False, cudnn_off=True)\n check_consistency(sym, ctx_list_v2_3D)\n sym = mx.sym.BatchNorm(name='norm', fix_gamma=True, cudnn_off=True)\n check_consistency(sym, ctx_list_v2_3D)\n\n\n@with_seed()\ndef test_batchnorm_versions():\n def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_global_stats):\n ctx_list = []\n sym_list = []\n # BatchNormV1 cpu\n if 'batchnorm_v1_cpu' in batchnorm_op_list:\n ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})\n sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,\n use_global_stats=use_global_stats,\n name='batchnorm'))\n\n # BatchNormV1 gpu (organic)\n if 'batchnorm_v1_gpu' in batchnorm_op_list:\n ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})\n sym_list.append(mx.sym.BatchNorm_v1(fix_gamma=fix_gamma,\n use_global_stats=use_global_stats,\n name='batchnorm'))\n\n # BatchNorm cpu\n if 'batchnorm_cpu' in batchnorm_op_list:\n ctx_list.append({'ctx': mx.cpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})\n sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,\n use_global_stats=use_global_stats,\n name='batchnorm'))\n\n # BatchNorm gpu (organic)\n if 'batchnorm_gpu' in batchnorm_op_list:\n ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})\n sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,\n use_global_stats=use_global_stats,\n name='batchnorm', cudnn_off=True))\n\n # BatchNorm gpu cudnn (if cudnn is enabled)\n if 'batchnorm_cudnn' in batchnorm_op_list:\n ctx_list.append({'ctx': mx.gpu(0), 'batchnorm_data': data, 'type_dict': {'batchnorm_data': np.float32}})\n sym_list.append(mx.sym.BatchNorm(fix_gamma=fix_gamma,\n use_global_stats=use_global_stats,\n name='batchnorm', cudnn_off=False))\n\n check_consistency(sym_list, ctx_list)\n\n def test_1d_batchnorm(fix_gamma, use_global_stats):\n data = (2, 3, 20)\n test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',\n 'batchnorm_gpu', 'batchnorm_cudnn'],\n data=data,\n fix_gamma=fix_gamma, use_global_stats=use_global_stats)\n\n def test_2d_batchnorm(fix_gamma, use_global_stats):\n data = (2, 3, 10, 10)\n test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu',\n 'batchnorm_cpu',\n 'batchnorm_gpu', 'batchnorm_cudnn'],\n data=data,\n fix_gamma=fix_gamma, use_global_stats=use_global_stats)\n\n def test_3d_batchnorm(fix_gamma, use_global_stats):\n data = (2, 3, 3, 5, 5)\n test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',\n 'batchnorm_gpu'],\n data=data,\n fix_gamma=fix_gamma, use_global_stats=use_global_stats)\n\n test_1d_batchnorm(True, False)\n test_1d_batchnorm(False, False)\n test_1d_batchnorm(False, True)\n test_1d_batchnorm(True, True)\n\n test_2d_batchnorm(True, False)\n test_2d_batchnorm(False, False)\n test_2d_batchnorm(False, True)\n test_2d_batchnorm(True, True)\n\n test_3d_batchnorm(True, False)\n test_3d_batchnorm(False, False)\n test_3d_batchnorm(False, True)\n test_3d_batchnorm(True, True)\n\n\n@with_seed(1234)\ndef test_convolution_with_type():\n sym1 = mx.sym.Convolution(num_filter=3, kernel=(3,3), name='conv')\n\n data = mx.sym.Variable('conv_data')\n w = mx.sym.Variable('conv_weight')\n b = mx.sym.Variable('conv_bias')\n w = mx.sym.transpose(w, axes=(0,2,3,1))\n sym2 = mx.sym.transpose(data, axes=(0,2,3,1))\n sym2 = mx.sym.Convolution(sym2, w, b, layout='NHWC', num_filter=3, kernel=(3,3))\n sym2 = mx.sym.transpose(sym2, axes=(0,3,1,2), name='conv')\n\n sym = [sym1, sym1, sym1, sym1, sym1, sym2, sym2]\n ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float16}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 10, 10), 'type_dict': {'conv_data': np.float32}},\n # NHWC\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),\n 'type_dict': {'conv_data': np.float32, 'conv_weight': np.float32}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 10, 10), 'conv_weight': (3, 2, 3, 3),\n 'type_dict': {'conv_data': np.float16, 'conv_weight': np.float16}}\n ]\n # wider tolerance needed for true-fp16 NCHW test above\n tol = {np.dtype(np.float16): 0.5,\n np.dtype(np.float32): 1e-3,\n np.dtype(np.float64): 1e-5,\n np.dtype(np.uint8): 0,\n np.dtype(np.int32): 0}\n check_consistency(sym, ctx_list, tol=tol)\n # test ability to turn off training on bias\n check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)\n\n\n# Apply N symbols against each of M contexts, checking that all NxM combinations match.\ndef check_consistency_NxM(sym_list, ctx_list):\n # e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:\n # sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]\n check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list))\n\n@with_seed()\ndef test_convolution_options():\n # 1D convolution\n ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float16}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float32}}]\n # Pad > 0\n sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Stride > 1\n sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Dilate > 1\n sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # 1x1 convolution\n sym = mx.sym.Convolution(layout='NCW', num_filter=3, kernel=(1,), pad=(0,), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,), pad=(0,), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n\n # 2D convolution\n ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float16}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]\n # Pad > 0\n sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Stride > 1\n sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), stride=(2,2), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Dilate > 1\n sym = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # 1x1 convolution\n sym = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1), pad=(0,0), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n\n # 3D convolution\n ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]\n # Pad > 0\n sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Stride > 1\n sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # 1x1 convolution\n sym = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), name='conv')\n sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n\n\n@with_seed()\ndef test_convolution_versions():\n # 2D convolution NCHW\n ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}}]\n conv_v1_cpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')\n conv_v1_gpu = mx.sym.Convolution_v1(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')\n conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')\n conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), name='conv')\n conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(3,3), pad=(1,1), cudnn_off=True, name='conv')\n syms = [conv_v1_cpu, conv_v1_gpu, conv_cudnn, conv_cpu, conv_gpu]\n check_consistency(syms, ctx_list)\n\n # 3D convolution NCDHW\n ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}},\n {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]\n conv_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')\n conv_cpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')\n conv_gpu = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')\n syms = [conv_cudnn, conv_cpu, conv_gpu]\n check_consistency(syms, ctx_list)\n\n\n@with_seed()\ndef test_pooling_with_type():\n ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},\n {'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},\n {'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},\n {'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},\n {'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]\n sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='valid', name='pool')\n check_consistency(sym, ctx_list)\n\n sym = mx.sym.Pooling(kernel=(3,3), pool_type='max', pooling_convention='full', name='pool')\n check_consistency(sym, ctx_list)\n\n sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')\n check_consistency(sym, ctx_list)\n\n\n@with_seed()\ndef test_deconvolution_with_type():\n # Test basic deconvolution without exercising stride, pad or dilation.\n # 1D deconvolution\n sym = mx.sym.Deconvolution(num_filter=3, kernel=(3,), name='deconv')\n ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},\n {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},\n {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},\n {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},\n {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]\n # wider tolerance needed for true-fp16 test above\n tol = {np.dtype(np.float16): 0.3,\n np.dtype(np.float32): 1e-3,\n np.dtype(np.float64): 1e-5,\n np.dtype(np.uint8): 0,\n np.dtype(np.int32): 0}\n check_consistency(sym, ctx_list, tol=tol)\n check_consistency(sym, ctx_list, tol=tol, grad_req=\"add\")\n\n # 2D deconvolution\n sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')\n ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},\n {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},\n {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},\n {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},\n {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]\n # wider tolerance needed for true-fp16 test above\n tol = {np.dtype(np.float16): 0.3,\n np.dtype(np.float32): 1e-3,\n np.dtype(np.float64): 1e-5,\n np.dtype(np.uint8): 0,\n np.dtype(np.int32): 0}\n check_consistency(sym, ctx_list, tol=tol)\n check_consistency(sym, ctx_list, tol=tol, grad_req=\"add\")\n\n\n@with_seed()\ndef test_deconvolution_options():\n\n # 1D deconvolution\n ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},\n {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}},\n {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float16}},\n {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float64}},\n {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 7), 'type_dict': {'deconv_data': np.float32}}]\n # Pad > 0\n sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), pad=(1,), name='deconv')\n sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), pad=(1,), cudnn_off=True, name='deconv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Stride > 1\n sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), stride=(2,), name='deconv')\n sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), stride=(2,), cudnn_off=True, name='deconv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Dilate > 1\n sym = mx.sym.Deconvolution(layout='NCW', num_filter=3, kernel=(3,), dilate=(2,), name='deconv')\n sym_no_cudnn = mx.sym.Deconvolution(num_filter=3, kernel=(3,), dilate=(2,), cudnn_off=True, name='deconv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n\n # 2D deconvolution\n ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},\n {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}},\n {'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float16}},\n {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},\n {'ctx': mx.cpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float32}}]\n # Pad > 0\n sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), name='deconv')\n sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), pad=(1,1), cudnn_off=True, name='deconv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Stride > 1\n sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), name='deconv')\n sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), stride=(2,2), cudnn_off=True, name='deconv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n # Dilate > 1\n sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), name='deconv')\n sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')\n check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n\n# # 3D deconvolution (not yet enabled)\n# ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},\n# {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},\n# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},\n# {'ctx': mx.gpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float32}}]\n# # Pad > 0\n# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), name='conv')\n# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), pad=(1,1,1), cudnn_off=True, name='conv')\n# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n# # Stride > 1\n# sym = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), name='conv')\n# sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(2,3,3), stride=(2,2,2), cudnn_off=True, name='conv')\n# check_consistency_NxM([sym, sym_no_cudnn], ctx_list)\n\n\n@with_seed(1234)\ndef test_bilinear_sampler_with_type():\n data = mx.sym.Variable('data')\n grid = mx.sym.Variable('grid')\n sym = mx.sym.BilinearSampler(data=data, grid=grid)\n ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),\n 'type_dict': {'data': np.float64}},\n {'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),\n 'type_dict': {'data': np.float32}},\n {'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),\n 'type_dict': {'data': np.float16}},\n {'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),\n 'type_dict': {'data': np.float64}},\n {'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'grid': (1, 2, 10, 10),\n 'type_dict': {'data': np.float32}}]\n check_consistency(sym, ctx_list)\n check_consistency(sym, ctx_list, grad_req=\"add\")\n\n\n@with_seed()\ndef test_grid_generator_with_type():\n data = mx.sym.Variable('data')\n sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))\n ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}},\n {'ctx': mx.cpu(0), 'data': (3, 6), 'type_dict': {'data': np.float32}}]\n check_consistency(sym, ctx_list)\n check_consistency(sym, ctx_list, grad_req=\"add\")\n sym = mx.sym.GridGenerator(data=data, transform_type='warp', target_shape=(20, 20))\n ctx_list = [{'ctx': mx.gpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}},\n {'ctx': mx.cpu(0), 'data': (3, 2, 20, 20), 'type_dict': {'data': np.float32}}]\n check_consistency(sym, ctx_list)\n check_consistency(sym, ctx_list, grad_req=\"add\")\n\n\[email protected](\"test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/7645\")\n@with_seed(1234)\ndef test_spatial_transformer_with_type():\n data = mx.sym.Variable('data')\n loc = mx.sym.Flatten(data)\n loc = mx.sym.FullyConnected(data=loc, num_hidden=10)\n loc = mx.sym.Activation(data=loc, act_type='relu')\n loc = mx.sym.FullyConnected(data=loc, num_hidden=6)\n sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10),\n transform_type=\"affine\", sampler_type=\"bilinear\")\n ctx_list = [{'ctx': mx.gpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float32}},\n {'ctx': mx.cpu(0), 'data': (1, 5, 10, 10), 'type_dict': {'data': np.float32}}]\n check_consistency(sym, ctx_list)\n check_consistency(sym, ctx_list, grad_req=\"add\")\n\n\n# Checking max pooling consistency over the data sets of different float types is problematic\n# as one max value in a float32 data set may not be the max value in a float16 data set.\n# This function will not be called.\n@with_seed(1234)\ndef test_pooling_with_type():\n ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},\n {'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},\n {'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float16}},\n {'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},\n {'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict': {'pool_data': np.float32}}]\n\n sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2), pool_type='max')\n check_consistency(sym, ctx_list)\n\n sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')\n check_consistency(sym, ctx_list)\n\n # this is unstable\n # sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')\n # check_consistency(sym, ctx_list)\n\n sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')\n check_consistency(sym, ctx_list)\n\n\n@with_seed()\ndef test_pooling_versions():\n def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, stride,\n pooling_convention='valid', global_pool=False):\n ctx_list = []\n sym_list = []\n # PoolingV1 cpu\n if 'pool_v1_cpu' in pool_op_list:\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n if not global_pool:\n sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, name='pool'))\n else:\n sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))\n # PoolingV1 gpu\n if 'pool_v1_gpu' in pool_op_list:\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n if not global_pool:\n sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, name='pool'))\n else:\n sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))\n # Pooling cpu\n if 'pool_cpu' in pool_op_list:\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n if not global_pool:\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, name='pool'))\n else:\n sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))\n # Pooling gpu\n if 'pool_gpu' in pool_op_list:\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n if not global_pool:\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, cudnn_off=True, name='pool'))\n else:\n sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=True,\n name='pool'))\n # CuDNNPooling\n if 'pool_cudnn' in pool_op_list:\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n if not global_pool:\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, cudnn_off=False, name='pool'))\n else:\n sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=False,\n name='pool'))\n check_consistency(sym_list, ctx_list)\n\n def test_1d_pooling(pool_type):\n data = (2, 3, 20)\n kernel = (4,)\n pad = (0,)\n stride = (1,)\n test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],\n data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention='valid', global_pool=False)\n\n pad = (2,)\n stride = (2,)\n test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],\n data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention='valid', global_pool=False)\n\n pad = (0,)\n stride = (1,)\n test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],\n data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention='full', global_pool=False)\n\n pad = (2,)\n stride = (2,)\n test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],\n data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention='full', global_pool=False)\n\n test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],\n data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n global_pool=True)\n\n def test_2d_pooling(pool_type):\n data = (2, 3, 20, 20)\n kernel = (4, 5)\n pad = (0, 0)\n stride = (1, 1)\n test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],\n data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention='valid', global_pool=False)\n\n # pool_v1 has bugs when pad is not 0, do not test PoolingV1 here\n pad = (2, 3)\n stride = (2, 3)\n test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],\n data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention='valid', global_pool=False)\n\n pad = (0, 0)\n stride = (1, 1)\n test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],\n data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention='full', global_pool=False)\n\n # pool_v1 has bugs when pad is not 0, do not test PoolingV1 here\n pad = (2, 3)\n stride = (2, 3)\n test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],\n data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention='full', global_pool=False)\n\n test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],\n data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n global_pool=True)\n\n def test_3d_pooling(pool_type):\n data = (2, 3, 20, 20, 20)\n kernel = (4, 5, 3)\n pad = (0, 0, 0)\n stride = (1, 1, 1)\n test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],\n data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention='valid', global_pool=False)\n\n pad = (2, 3, 3)\n stride = (2, 3, 1)\n test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],\n data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention='valid', global_pool=False)\n\n pad = (0, 0, 0)\n stride = (1, 1, 1)\n test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],\n data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention='full', global_pool=False)\n\n pad = (2, 3, 3)\n stride = (2, 3, 1)\n test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],\n data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention='full', global_pool=False)\n\n test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],\n data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n global_pool=True)\n\n test_1d_pooling('max')\n test_1d_pooling('avg')\n test_1d_pooling('sum')\n\n test_2d_pooling('max')\n test_2d_pooling('avg')\n test_2d_pooling('sum')\n\n test_3d_pooling('max')\n test_3d_pooling('avg')\n test_3d_pooling('sum')\n\n\n@with_seed()\ndef test_global_pooling():\n def test_1d_pooling(pool_type):\n data = (2, 3, 20)\n kernel = (4,)\n pad = (2,)\n stride = (2,)\n\n ctx_list = []\n sym_list = []\n\n pooling_convention = 'valid'\n\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, name='pool'))\n\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, name='pool'))\n\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))\n\n check_consistency(sym_list, ctx_list)\n\n def test_2d_pooling(pool_type):\n data = (2, 3, 20, 20)\n kernel = (4, 4)\n pad = (2, 2)\n stride = (2, 2)\n\n ctx_list = []\n sym_list = []\n\n pooling_convention = 'valid'\n\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, name='pool'))\n\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling_v1(kernel=kernel, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, name='pool'))\n\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling_v1(pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, name='pool'))\n\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, name='pool'))\n\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, name='pool'))\n\n ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, cudnn_off=False, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))\n\n ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})\n sym_list.append(mx.sym.Pooling(pool_type=pool_type,\n pooling_convention=pooling_convention, global_pool=True, cudnn_off=True, name='pool'))\n\n\n check_consistency(sym_list, ctx_list)\n\n test_1d_pooling('max')\n test_1d_pooling('avg')\n test_1d_pooling('sum')\n\n test_2d_pooling('max')\n test_2d_pooling('avg')\n test_2d_pooling('sum')\n\n\n@with_seed()\ndef test_upsampling_with_type():\n sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='nearest', num_args=1)\n ctx_list = [{'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},\n {'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}},\n {'ctx': mx.gpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float16}},\n {'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float64}},\n {'ctx': mx.cpu(0), 'up_arg0': (2, 2, 2, 10), 'type_dict': {'up_arg0': np.float32}}]\n check_consistency(sym, ctx_list)\n\n\n@with_seed()\ndef test_upsampling_bilinear_with_type():\n sym = mx.sym.UpSampling(scale=2, num_filter=2, name='up', sample_type='bilinear', num_args=1)\n ctx_list = [{'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},\n {'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}},\n {'ctx': mx.gpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float16}},\n {'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float64}},\n {'ctx': mx.cpu(0), 'up_data': (2, 2, 2, 10), 'type_dict': {'up_data': np.float32}}]\n check_consistency(sym, ctx_list)\n\n\n@with_seed()\ndef test_concat_with_type():\n sym = mx.sym.Concat(name='concat', num_args=2)\n ctx_list = [{'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\n 'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},\n {'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\n 'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}},\n {'ctx': mx.gpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\n 'type_dict': {'concat_arg0': np.float16, 'concat_arg1': np.float16}},\n {'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\n 'type_dict': {'concat_arg0': np.float64, 'concat_arg1': np.float64}},\n {'ctx': mx.cpu(0), 'concat_arg1': (2, 10), 'concat_arg0': (2, 10),\n 'type_dict': {'concat_arg0': np.float32, 'concat_arg1': np.float32}}]\n check_consistency(sym, ctx_list)\n\n\n@with_seed()\ndef test_elementwisesum_with_type():\n dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],\n [mx.cpu(0), [np.float64, np.float32]] ]\n for num_args in range(1, 6):\n ews_arg_shape = {}\n for i in range(num_args):\n ews_arg_shape['ews_arg'+str(i)] = (2, 10)\n sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)\n ctx_list = []\n for dev, types in dev_types:\n for dtype in types:\n ews_arg_dtype = {'type_dict':{}}\n for i in range(num_args):\n ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype\n ctx_elem = {'ctx': dev}\n ctx_elem.update(ews_arg_shape)\n ctx_elem.update(ews_arg_dtype)\n ctx_list.append(ctx_elem)\n check_consistency(sym, ctx_list)\n\n\n@with_seed()\ndef test_reshape_with_type():\n sym = mx.sym.Reshape(name='reshape', shape=(-1,1,1,0))\n ctx_list = [{'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},\n {'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}},\n {'ctx': mx.gpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float16}},\n {'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float64}},\n {'ctx': mx.cpu(0), 'reshape_data': (2, 2, 2, 10), 'type_dict': {'reshape_data': np.float32}}]\n check_consistency(sym, ctx_list)\n\n\n@with_seed()\ndef test_blockgrad_with_type():\n sym = mx.sym.BlockGrad(name='bg')\n ctx_list = [{'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},\n {'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}},\n {'ctx': mx.gpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float16}},\n {'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float64}},\n {'ctx': mx.cpu(0), 'bg_data': (2, 2, 2, 10), 'type_dict': {'bg_data': np.float32}}]\n check_consistency(sym, ctx_list)\n\n\n@with_seed()\ndef test_swapaxis_with_type():\n sym = mx.sym.SwapAxis(name='swap', dim1=1)\n ctx_list = [{'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},\n {'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}},\n {'ctx': mx.gpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float16}},\n {'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float64}},\n {'ctx': mx.cpu(0), 'swap_data': (2, 2, 2, 10), 'type_dict': {'swap_data': np.float32}}]\n check_consistency(sym, ctx_list)\n\n\n@with_seed()\ndef test_fullyconnected_with_type():\n sym = mx.sym.FullyConnected(num_hidden=3, name='inner')\n ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},\n {'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}},\n {'ctx': mx.gpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float16}},\n {'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float64}},\n {'ctx': mx.cpu(0), 'inner_data': (2, 10), 'type_dict': {'inner_data': np.float32}}]\n check_consistency(sym, ctx_list)\n # Sizes are divisible by 8 to test TensorCore on Volta GPU.\n sym = mx.sym.FullyConnected(num_hidden=8, name='inner')\n ctx_list = [{'ctx': mx.gpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float16}},\n {'ctx': mx.cpu(0), 'inner_data': (16, 24), 'type_dict': {'inner_data': np.float32}}]\n check_consistency(sym, ctx_list)\n\n\n@with_seed()\ndef test_activation_with_type():\n sym = mx.sym.Activation(name='act', act_type='sigmoid')\n ctx_list = [{'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float64}},\n {'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float32}},\n {'ctx': mx.gpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float16}},\n {'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float64}},\n {'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float32}},\n {'ctx': mx.cpu(0), 'act_data': (2, 2, 10, 10), 'type_dict': {'act_data': np.float16}}]\n check_consistency(sym, ctx_list)\n\n\n@with_seed()\ndef test_lrn():\n sym = mx.sym.LRN(alpha=0.0001, beta=0.75, knorm=2, nsize=5, name='lrn')\n ctx_list = [{'ctx': mx.gpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}},\n {'ctx': mx.cpu(0), 'lrn_data': (2, 6, 10, 10), 'type_dict': {'lrn_data': np.float32}}]\n check_consistency(sym, ctx_list)\n\n\n@with_seed()\ndef test_embedding_with_type():\n def test_embedding_helper(data_types, weight_types, low_pad, high_pad):\n NVD = [[20, 10, 20], [200, 10, 300]]\n for N, V, D in NVD:\n sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)\n ctx_list = []\n for data_type in data_types:\n for weight_type in weight_types:\n ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),\n 'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})\n ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),\n 'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})\n arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}\n check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},\n arg_params=arg_params)\n\n data_types = [np.float16, np.float32, np.float64, np.int32]\n weight_types = [np.float16, np.float32, np.float64]\n test_embedding_helper(data_types, weight_types, 5, 5)\n data_types = [np.uint8]\n weight_types = [np.float16, np.float32, np.float64]\n test_embedding_helper(data_types, weight_types, 0, 5)\n\n\[email protected](\"test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/8288\")\n@with_seed()\ndef test_svmoutput_with_type():\n sym = mx.sym.SVMOutput(name='svmoutput', use_linear=True)\n ctx_list = [{'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},\n {'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},\n {'ctx': mx.gpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}},\n {'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float64}},\n {'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float32}},\n {'ctx': mx.cpu(0), 'svmoutput_data': (20, 10), 'type_dict': {'svmoutput_data': np.float16}}]\n check_consistency(sym, ctx_list)\n\n\n@with_seed()\ndef test_take_with_type():\n sym = mx.sym.take(name='take')\n for data_ndim in range(2, 5):\n for idx_ndim in range(1, 4):\n data_shape = ()\n for _ in range(data_ndim):\n data_shape += (np.random.randint(low=3, high=6), )\n idx_shape = ()\n for _ in range(idx_ndim):\n idx_shape += (np.random.randint(low=3, high=5), )\n ctx_list = [{'ctx': mx.gpu(0), 'take_indices': idx_shape,\n 'take_a': data_shape,\n 'type_dict': {'take_indices': np.float64,\n 'take_a': np.float64}},\n {'ctx': mx.gpu(0), 'take_indices': idx_shape,\n 'take_a': data_shape,\n 'type_dict': {'take_indices': np.float32,\n 'take_a': np.float32}},\n {'ctx': mx.gpu(0), 'take_indices': idx_shape,\n 'take_a': data_shape,\n 'type_dict': {'take_indices': np.float16,\n 'take_a': np.float16}},\n {'ctx': mx.cpu(0), 'take_indices': idx_shape,\n 'take_a': data_shape,\n 'type_dict': {'take_indices': np.float64,\n 'take_a': np.float64}},\n {'ctx': mx.cpu(0), 'take_indices': idx_shape,\n 'take_a': data_shape,\n 'type_dict': {'take_indices': np.float32,\n 'take_a': np.float32}},\n {'ctx': mx.cpu(0), 'take_indices': idx_shape,\n 'take_a': data_shape,\n 'type_dict': {'take_indices': np.float16,\n 'take_a': np.float16}}]\n arg_params = {'take_indices': np.random.randint(low=0,\n high=data_shape[0],\n size=idx_shape),\n 'take_a': np.random.normal(size=data_shape)}\n check_consistency(sym, ctx_list,\n grad_req={'take_indices': 'null',\n 'take_a': 'write'},\n arg_params=arg_params)\n\n\ndef check_rnn_consistency(cell1, cell2):\n dshape = (32, 5, 200)\n data = mx.sym.Variable('data')\n\n sym1, _ = cell1.unroll(5, data, merge_outputs=True)\n mod1 = mx.mod.Module(sym1, label_names=None, context=mx.gpu(0))\n mod1.bind(data_shapes=[('data', dshape)], label_shapes=None)\n\n sym2, _ = cell2.unroll(5, data, merge_outputs=True)\n mod2 = mx.mod.Module(sym2, label_names=None, context=mx.gpu(0))\n mod2.bind(data_shapes=[('data', dshape)], label_shapes=None)\n\n mod1.init_params()\n args, auxs = mod1.get_params()\n args = cell1.unpack_weights(args)\n args = cell2.pack_weights(args)\n mod2.set_params(args, auxs)\n\n batch=mx.io.DataBatch(data=[mx.random.uniform(shape=dshape)], label=[])\n mod1.forward(batch, is_train=False)\n mod2.forward(batch, is_train=False)\n\n assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=1e-2, atol=1e-4)\n\n@with_seed()\ndef test_rnn():\n fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='rnn_relu', prefix='')\n\n stack = mx.rnn.SequentialRNNCell()\n stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l0_'))\n stack.add(mx.rnn.RNNCell(100, activation='relu', prefix='l1_'))\n\n check_rnn_consistency(fused, stack)\n check_rnn_consistency(stack, fused)\n\n\n@with_seed()\ndef test_lstm():\n fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='lstm', prefix='')\n\n stack = mx.rnn.SequentialRNNCell()\n stack.add(mx.rnn.LSTMCell(100, prefix='l0_'))\n stack.add(mx.rnn.LSTMCell(100, prefix='l1_'))\n\n check_rnn_consistency(fused, stack)\n check_rnn_consistency(stack, fused)\n\n\n@with_seed()\ndef test_lstm_forget_bias():\n forget_bias = 2.0\n fused = mx.rnn.FusedRNNCell(10, forget_bias=forget_bias, num_layers=2, mode='lstm', prefix='')\n\n dshape = (32, 1, 20)\n data = mx.sym.Variable('data')\n\n sym, _ = fused.unroll(1, data, merge_outputs=True)\n mod = mx.mod.Module(sym, label_names=None, context=mx.gpu(0))\n mod.bind(data_shapes=[('data', dshape)], label_shapes=None)\n\n mod.init_params()\n\n args, auxs = mod.get_params()\n args = fused.unpack_weights(args)\n\n bias_name = next(x for x in args if x.endswith('f_bias'))\n expected_bias = forget_bias * np.ones(10, )\n assert_allclose(args[bias_name].asnumpy(), expected_bias)\n\n\n@with_seed()\ndef test_gru():\n fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='')\n\n stack = mx.rnn.SequentialRNNCell()\n stack.add(mx.rnn.GRUCell(100, prefix='l0_'))\n stack.add(mx.rnn.GRUCell(100, prefix='l1_'))\n\n check_rnn_consistency(fused, stack)\n check_rnn_consistency(stack, fused)\n\n\n@with_seed()\ndef test_bidirectional():\n fused = mx.rnn.FusedRNNCell(100, num_layers=2, mode='gru', prefix='',\n bidirectional=True)\n\n stack = mx.rnn.SequentialRNNCell()\n stack.add(mx.rnn.BidirectionalCell(\n mx.rnn.GRUCell(100, prefix='l0_'),\n mx.rnn.GRUCell(100, prefix='r0_'),\n output_prefix='bi_gru_0_'))\n stack.add(mx.rnn.BidirectionalCell(\n mx.rnn.GRUCell(100, prefix='l1_'),\n mx.rnn.GRUCell(100, prefix='r1_'),\n output_prefix='bi_gru_1_'))\n\n check_rnn_consistency(fused, stack)\n check_rnn_consistency(stack, fused)\n\n\n@with_seed()\ndef test_unfuse():\n for mode in ['rnn_tanh', 'rnn_relu', 'lstm', 'gru']:\n fused = mx.rnn.FusedRNNCell(\n 100, num_layers=2, mode=mode,\n prefix='test_%s'%mode,\n bidirectional=True,\n dropout=0.5)\n\n stack = fused.unfuse()\n\n check_rnn_consistency(fused, stack)\n check_rnn_consistency(stack, fused)\n\n\n@with_seed(1234)\ndef test_psroipooling_with_type():\n arg_params = {\n 'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}\n\n # plain psroipooling\n sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')\n ctx_list = [{'ctx': mx.gpu(0),\n 'psroipool_data': (1, 18, 14, 14),\n 'psroipool_rois': (2, 5),\n 'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},\n {'ctx': mx.gpu(0),\n 'psroipool_data': (1, 18, 14, 14),\n 'psroipool_rois': (2, 5),\n 'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},\n {'ctx': mx.gpu(0),\n 'psroipool_data': (1, 18, 14, 14),\n 'psroipool_rois': (2, 5),\n 'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},\n ]\n\n check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',\n 'psroipool_rois': 'null'}, arg_params=arg_params)\n\n\n@with_seed(1234)\ndef test_deformable_psroipooling_with_type():\n arg_params = {\n 'deformable_psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}\n\n # deformable psroipooling\n sym = mx.sym.contrib.DeformablePSROIPooling(spatial_scale=0.0625, sample_per_part=4, group_size=3, pooled_size=3,\n output_dim=2, trans_std=0.1, no_trans=False, name='deformable_psroipool')\n\n ctx_list = [{'ctx': mx.gpu(0),\n 'deformable_psroipool_data': (1, 18, 14, 14),\n 'deformable_psroipool_rois': (2, 5),\n 'deformable_psroipool_trans': (2, 4, 3, 3),\n 'type_dict': {'deformable_psroipool_data': np.float64, 'deformable_psroipool_rois': np.float64,\n 'deformable_psroipool_trans': np.float64}},\n {'ctx': mx.gpu(0),\n 'deformable_psroipool_data': (1, 18, 14, 14),\n 'deformable_psroipool_rois': (2, 5),\n 'deformable_psroipool_trans': (2, 4, 3, 3),\n 'type_dict': {'deformable_psroipool_data': np.float32, 'deformable_psroipool_rois': np.float32,\n 'deformable_psroipool_trans': np.float32}},\n {'ctx': mx.gpu(0),\n 'deformable_psroipool_data': (1, 18, 14, 14),\n 'deformable_psroipool_rois': (2, 5),\n 'deformable_psroipool_trans': (2, 4, 3, 3),\n 'type_dict': {'deformable_psroipool_data': np.float16, 'deformable_psroipool_rois': np.float16,\n 'deformable_psroipool_trans': np.float16}},\n ]\n\n check_consistency(sym, ctx_list, grad_req={'deformable_psroipool_data': 'write',\n 'deformable_psroipool_rois': 'null',\n 'deformable_psroipool_trans': 'write'}, arg_params=arg_params)\n\n\n@with_seed(1234)\ndef test_deformable_convolution_with_type():\n sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')\n # since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here\n ctx_list = [{'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 10, 10),\n 'deformable_conv_offset': (2, 18, 8, 8),\n 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},\n {'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 10, 10),\n 'deformable_conv_offset': (2, 18, 8, 8),\n 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},\n # {'ctx': mx.gpu(0),\n # 'deformable_conv_data': (2, 2, 10, 10),\n # 'deformable_conv_offset': (2, 18, 8, 8),\n # 'type_dict': {'deformable_conv_data': np.float16, 'deformable_conv_offset': np.float16}},\n ]\n # wider tolerance needed for true-fp16 NCHW test above\n tol = {np.dtype(np.float16): 0.5,\n np.dtype(np.float32): 1e-3,\n np.dtype(np.float64): 1e-5,\n np.dtype(np.uint8): 0,\n np.dtype(np.int32): 0}\n check_consistency(sym, ctx_list, tol=tol)\n # test ability to turn off training on bias\n check_consistency(sym, ctx_list, grad_req={'deformable_conv_data': 'write',\n 'deformable_conv_offset': 'write',\n 'deformable_conv_weight': 'write',\n 'deformable_conv_bias': 'null'}, tol=tol)\n\n\n@with_seed()\ndef test_deformable_convolution_options():\n # 2D convolution\n\n # Pad > 0\n # since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here\n ctx_list = [{'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 7, 7),\n 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},\n {'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 7, 7),\n 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},\n # {'ctx': mx.gpu(0),\n # 'deformable_conv_data': (2, 2, 7, 7),\n # 'deformable_offset': (2, 18, 7, 7),\n # 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},\n ]\n sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), pad=(1,1), name='deformable_conv')\n check_consistency(sym, ctx_list)\n\n # Stride > 1\n # since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here\n ctx_list = [{'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 3, 3),\n 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},\n {'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 3, 3),\n 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},\n # {'ctx': mx.gpu(0),\n # 'deformable_conv_data': (2, 2, 7, 7),\n # 'deformable_conv_offset': (2, 18, 3, 3),\n # 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},\n ]\n sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), stride=(2,2), name='deformable_conv')\n check_consistency(sym, ctx_list)\n\n # Dilate > 1\n # since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here\n ctx_list = [{'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 3, 3),\n 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},\n {'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 18, 3, 3),\n 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},\n # {'ctx': mx.gpu(0),\n # 'deformable_conv_data': (2, 2, 7, 7),\n # 'deformable_conv_offset': (2, 18, 3, 3),\n # 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},\n ]\n sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), dilate=(2,2), name='deformable_conv')\n check_consistency(sym, ctx_list)\n\n # Deformable group > 1\n # since atomicAdd does not support fp16 (which deformable conv uses in backward), we do not test fp16 here\n ctx_list = [{'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 36, 5, 5),\n 'type_dict': {'deformable_conv_data': np.float64, 'deformable_conv_offset': np.float64}},\n {'ctx': mx.gpu(0),\n 'deformable_conv_data': (2, 2, 7, 7),\n 'deformable_conv_offset': (2, 36, 5, 5),\n 'type_dict': {'deformable_conv_data': np.float32, 'deformable_conv_offset': np.float32}},\n # {'ctx': mx.gpu(0),\n # 'deformable_conv_data': (2, 2, 7, 7),\n # 'deformable_conv_offset': (2, 36, 5, 5),\n # 'type_dict': {'deformable_conv_data': np.float16, 'deformable_offset': np.float16}},\n ]\n sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2,\n name='deformable_conv')\n\n\n@with_seed()\ndef test_residual_fused():\n cell = mx.rnn.ResidualCell(\n mx.rnn.FusedRNNCell(50, num_layers=3, mode='lstm',\n prefix='rnn_', dropout=0.5))\n\n inputs = [mx.sym.Variable('rnn_t%d_data'%i) for i in range(2)]\n outputs, _ = cell.unroll(2, inputs, merge_outputs=None)\n assert sorted(cell.params._params.keys()) == \\\n ['rnn_parameters']\n\n args, outs, auxs = outputs.infer_shape(rnn_t0_data=(10, 50), rnn_t1_data=(10, 50))\n assert outs == [(10, 2, 50)]\n outputs = outputs.eval(ctx=mx.gpu(0),\n rnn_t0_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,\n rnn_t1_data=mx.nd.ones((10, 50), ctx=mx.gpu(0))+5,\n rnn_parameters=mx.nd.zeros((61200,), ctx=mx.gpu(0)))\n expected_outputs = np.ones((10, 2, 50))+5\n assert np.array_equal(outputs[0].asnumpy(), expected_outputs)\n\n\ndef check_rnn_layer(layer):\n layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])\n with mx.gpu(0):\n x = mx.nd.ones((10, 16, 30))\n states = layer.begin_state(16)\n go, gs = layer(x, states)\n\n with mx.cpu(0):\n x = mx.nd.ones((10, 16, 30))\n states = layer.begin_state(16)\n co, cs = layer(x, states)\n\n # atol of 1e-6 required, as exposed by seed 2124685726\n assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)\n for g, c in zip(gs, cs):\n assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)\n\ndef check_rnn_layer_w_rand_inputs(layer):\n layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])\n x = mx.nd.uniform(shape=(10, 16, 30))\n with mx.gpu(0):\n x = x.copyto(mx.gpu(0))\n states = layer.begin_state(16)\n go, gs = layer(x, states)\n\n with mx.cpu(0):\n x = x.copyto(mx.cpu(0))\n states = layer.begin_state(16)\n co, cs = layer(x, states)\n\n assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)\n for g, c in zip(gs, cs):\n assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)\n\n@with_seed()\ndef test_rnn_layer():\n check_rnn_layer(gluon.rnn.RNN(100, num_layers=3))\n check_rnn_layer(gluon.rnn.RNN(100, activation='tanh', num_layers=3))\n check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3))\n check_rnn_layer(gluon.rnn.GRU(100, num_layers=3))\n\n check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))\n check_rnn_layer_w_rand_inputs(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))\n\n@with_seed()\ndef test_sequence_reverse():\n check_sequence_reverse(mx.gpu(0))\n\n\[email protected](\"Test fails intermittently. Temporarily disabled until fixed. Tracked at https://github.com/apache/incubator-mxnet/issues/8211\")\n@with_seed()\ndef test_autograd_save_memory():\n x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))\n x.attach_grad()\n\n with mx.autograd.record():\n for i in range(200):\n x = x + 1\n x.wait_to_read()\n x.backward()\n\n\n@with_seed()\ndef test_gluon_ctc_consistency():\n loss = mx.gluon.loss.CTCLoss()\n data = mx.nd.arange(0, 4, repeat=40, ctx=mx.gpu(0)).reshape((2,20,4)).flip(axis=0)\n cpu_label = mx.nd.array([[2,1,-1,-1],[3,2,2,-1]], ctx=mx.cpu(0))\n gpu_label = mx.nd.array([[2,1,-1,-1],[3,2,2,-1]], ctx=mx.gpu(0))\n\n cpu_data = data.copy().as_in_context(mx.cpu(0))\n cpu_data.attach_grad()\n with mx.autograd.record():\n l_cpu = loss(cpu_data, cpu_label)\n l_cpu.backward()\n\n gpu_data = data.copyto(mx.gpu(0))\n gpu_data.attach_grad()\n with mx.autograd.record():\n l_gpu = loss(gpu_data, gpu_label)\n l_gpu.backward()\n\n assert_almost_equal(cpu_data.grad.asnumpy(), gpu_data.grad.asnumpy(), atol=1e-3, rtol=1e-3)\n\n\n@with_seed()\ndef test_cuda_rtc():\n source = r'''\n extern \"C\" __global__ void axpy(const float *x, float *y, float alpha) {\n int i = threadIdx.x + blockIdx.x * blockDim.x;\n y[i] += alpha * x[i];\n }\n\n extern \"C\" __global__ void saxpy(const float *x, float *y, float alpha) {\n extern __shared__ float smem[];\n int i = threadIdx.x + blockIdx.x * blockDim.x;\n smem[threadIdx.x] = x[i];\n y[i] += alpha * smem[threadIdx.x];\n }\n '''\n module = mx.rtc.CudaModule(source)\n axpy = module.get_kernel(\"axpy\", \"const float *x, float *y, float alpha\")\n x = mx.nd.ones((10,), ctx=mx.gpu(0))\n y = mx.nd.zeros((10,), ctx=mx.gpu(0))\n axpy.launch([x, y, 3.0], mx.gpu(0), (1, 1, 1), (10, 1, 1))\n assert (y.asnumpy() == 3).all()\n\n saxpy = module.get_kernel(\"saxpy\", \"const float *x, float *y, float alpha\")\n saxpy.launch([x, y, 4.0], mx.gpu(0), (1, 1, 1), (10, 1, 1), 10)\n assert (y.asnumpy() == 7).all()\n\n saxpy.launch([x, y, 5.0], mx.gpu(0), (2, 1, 1), (5, 1, 1), 5)\n assert (y.asnumpy() == 12).all()\n\n\n@with_seed()\ndef test_global_norm_clip_multi_device():\n x1 = mx.nd.ones((3,3), ctx=mx.gpu(0))\n x2 = mx.nd.ones((4,4), ctx=mx.cpu(0))\n norm = gluon.utils.clip_global_norm([x1, x2], 1.0)\n assert norm == 5.0\n assert_almost_equal(x1.asnumpy(), np.ones((3,3))/5)\n assert_almost_equal(x2.asnumpy(), np.ones((4,4))/5)\n\n\n@with_seed()\ndef test_cross_device_autograd():\n x = mx.nd.random.uniform(shape=(10,))\n x.attach_grad()\n\n with mx.autograd.record():\n y = mx.nd.tanh(x)\n y = y.copyto(mx.gpu(0))\n y = mx.nd.tanh(y)\n y = y.copyto(mx.cpu(0))\n y = mx.nd.tanh(y)\n y = y.copyto(mx.gpu(0))\n y = y.copyto(mx.gpu(0))\n\n y.backward()\n\n dx = x.grad.asnumpy()\n x.grad[:] = 0\n\n with mx.autograd.record():\n y = x\n for i in range(3):\n y = mx.nd.tanh(y)\n y.backward()\n\n assert_almost_equal(dx, x.grad.asnumpy())\n\[email protected](\"JIRA issue: https://issues.apache.org/jira/projects/MXNET/issues/MXNET-130\")\n@with_seed()\ndef test_multi_proposal_op():\n # paramters\n feature_stride = 16\n scales = (8, 16, 32)\n ratios = (0.5, 1, 2)\n rpn_pre_nms_top_n = 12000\n rpn_post_nms_top_n = 2000\n threshold = 0.7\n rpn_min_size = feature_stride\n\n feat_len = (1000 + 15) // 16\n H, W = feat_len, feat_len\n num_anchors = len(scales) * len(ratios)\n count_anchors = H * W * num_anchors\n\n def get_new_data(batch_size, ctx):\n '''\n cls_prob: (batch_size, 2 * num_anchors, H, W)\n bbox_pred: (batch_size, 4 * num_anchors, H, W)\n im_info: (batch_size, 3)\n '''\n\n dtype = np.float32\n cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = dtype, ctx = ctx)\n bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = dtype, ctx = ctx)\n im_info = mx.nd.empty((batch_size, 3), dtype = dtype, ctx = ctx)\n\n cls = [1.0 * (i + 1) / cls_prob.size for i in range(cls_prob.size)]\n np.random.shuffle(cls)\n cls_prob = mx.nd.reshape(mx.nd.array(cls, dtype = dtype, ctx = ctx), shape = cls_prob.shape)\n bbox_pred = mx.nd.array(np.random.randint(-2, 3, size = bbox_pred.shape), dtype = dtype, ctx = ctx)\n\n for i in range(batch_size):\n im_size = np.random.randint(600, feat_len * feature_stride, size = (2,))\n im_scale = np.random.randint(80, 100) / 100.0\n im_info[i, :] = [im_size[0], im_size[1], im_scale]\n return cls_prob, bbox_pred, im_info\n\n def check_proposal_consistency(op, batch_size):\n '''\n op is mx.nd.contrib.Proposal or mx.nd.contrib.MultiProposal\n '''\n cls_prob, bbox_pred, im_info = get_new_data(batch_size, mx.cpu(0))\n rois_cpu, score_cpu = op(\n cls_score = cls_prob,\n bbox_pred = bbox_pred,\n im_info = im_info,\n feature_stride = feature_stride,\n scales = scales,\n ratios = ratios,\n rpn_pre_nms_top_n = rpn_pre_nms_top_n,\n rpn_post_nms_top_n = rpn_post_nms_top_n,\n threshold = threshold,\n rpn_min_size = rpn_min_size, output_score = True)\n\n gpu_ctx = mx.gpu(0)\n\n # copy data to gpu from cpu\n cls_prob_gpu = cls_prob.as_in_context(gpu_ctx)\n bbox_pred_gpu = bbox_pred.as_in_context(gpu_ctx)\n im_info_gpu = im_info.as_in_context(gpu_ctx)\n\n rois_gpu, score_gpu = op(\n cls_score = cls_prob_gpu,\n bbox_pred = bbox_pred_gpu,\n im_info = im_info_gpu,\n feature_stride = feature_stride,\n scales = scales,\n ratios = ratios,\n rpn_pre_nms_top_n = rpn_pre_nms_top_n,\n rpn_post_nms_top_n = rpn_post_nms_top_n,\n threshold = threshold,\n rpn_min_size = rpn_min_size, output_score = True)\n\n rois_cpu_np = rois_cpu.asnumpy()\n rois_gpu_np = rois_gpu.asnumpy()\n\n score_cpu_np = score_cpu.asnumpy()\n score_gpu_np = score_gpu.asnumpy()\n\n assert_almost_equal(score_cpu_np, score_gpu_np, atol = 1e-3, rtol = 1e-3)\n assert_almost_equal(rois_cpu_np, rois_gpu_np, atol = 1e-3, rtol = 1e-3)\n\n check_proposal_consistency(mx.nd.contrib.Proposal, 1)\n check_proposal_consistency(mx.nd.contrib.MultiProposal, 20)\n\n\n# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.\ndef kernel_error_check_imperative():\n os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'\n a = mx.nd.array([1,2,3],ctx=mx.gpu(0))\n b = mx.nd.array([],ctx=mx.gpu(0))\n c = (a / b).asnumpy()\n\ndef kernel_error_check_symbolic():\n os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'\n a = mx.sym.Variable('a')\n b = mx.sym.Variable('b')\n c = a / b\n f = c.bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),\n 'b':mx.nd.array([],ctx=mx.gpu(0))})\n f.forward()\n g = f.outputs[0].asnumpy()\n\ndef test_kernel_error_checking():\n # Running tests that may throw exceptions out of worker threads will stop CI testing\n # if not run in a separate process (with its own address space for CUDA compatibility).\n try:\n mpctx = mp.get_context('spawn')\n except:\n print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %\n sys.version_info[0:2], file=sys.stderr, end='')\n else:\n with discard_stderr():\n for f in [kernel_error_check_imperative, kernel_error_check_symbolic]:\n p = mpctx.Process(target=f)\n p.start()\n p.join()\n assert p.exitcode != 0,\\\n \"Expected a synchronous kernel error from %s(), none seen.\" % f.__name__\n\ndef test_incorrect_gpu():\n # Try setting dev_id to a really big number\n assert_raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001))\n\n@with_seed()\ndef test_batchnorm_backwards_notrain():\n for ctx in [mx.cpu(0), mx.gpu(0)]:\n for cudnn_o in [False, True]:\n B,C,H,W = 4,3,2,2\n x = mx.nd.random.poisson(1,shape=(B,C,H,W)).as_in_context(ctx)\n gamma = mx.nd.random.normal(shape=(C)).as_in_context(ctx)\n beta = mx.nd.random.normal(shape=(C)).as_in_context(ctx)\n mean = mx.nd.random.normal(shape=(C)).as_in_context(ctx)\n std = mx.nd.random.normal(shape=(C)).as_in_context(ctx)\n x.attach_grad()\n\n with autograd.record(False):\n y = mx.ndarray.BatchNorm(x, gamma, beta, mean, std.square(),\n fix_gamma=False, cudnn_off=cudnn_o)\n loss=y.square().sum()\n loss.backward(train_mode=False)\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n"
] |
[
[
"numpy.multiply",
"numpy.fft.fft",
"numpy.reshape",
"numpy.arange",
"numpy.dtype",
"numpy.ones",
"numpy.random.shuffle",
"numpy.fft.ifft",
"numpy.random.normal",
"numpy.append",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] |
LinjianMa/koala
|
[
"a366af5d109cbcec820827702a4ef7cb0a3a02f3"
] |
[
"experiments/graph_state.py"
] |
[
"import math\nimport tensorbackends\nimport numpy as np\nimport tracemalloc\n\nfrom koala import candecomp\nfrom collections import namedtuple\nfrom scipy import fft\n\nGate = namedtuple('Gate', ['name', 'parameters', 'qubits'])\n\n\ndef generate_circuit(nsite, edges):\n circuit = []\n for edge in edges:\n circuit.append(Gate('CZ', [], edge))\n return circuit\n\n\ndef graph_candecomp(qstate,\n edges,\n alsmode='als',\n rank_threshold=800,\n cp_tol=1e-5,\n cp_maxiter=60,\n cp_inneriter=20,\n init_als='random',\n debug=True):\n nsite = qstate.nsite\n circuit = generate_circuit(nsite, edges)\n if alsmode == \"als\":\n qstate.apply_circuit(circuit,\n rank_threshold=rank_threshold,\n cp_tol=cp_tol,\n cp_maxiter=cp_maxiter,\n cp_inneriter=cp_inneriter,\n init_als=init_als,\n num_als_init=100,\n debug=debug)\n elif alsmode == \"direct\":\n qstate.apply_circuit_direct_cpd(circuit,\n rank_threshold=rank_threshold,\n debug=debug)\n\n\ndef fidelity(out_vector, true_vector):\n return out_vector @ true_vector.conj() / (tb.norm(true_vector) *\n tb.norm(out_vector))\n\n\ndef relative_residual(out_vector, true_vector):\n return tb.norm(out_vector - true_vector) / tb.norm(true_vector)\n\n\ndef get_7_nodes_conjecture_state(backend):\n tb = tensorbackends.get(backend)\n\n zero = tb.astensor(np.asarray([1., 0.]))\n one = tb.astensor(np.asarray([0., 1.]))\n plus = tb.astensor(1. / np.sqrt(2) * np.asarray([1., 1.]))\n minus = tb.astensor(1. / np.sqrt(2) * np.asarray([1., -1.]))\n\n out1 = 1. / (2 * np.sqrt(2)) * tb.einsum(\n \"a,b,c,d,e,f,g->abcdefg\", minus, one, minus, zero, plus, zero, plus)\n out2 = -1. / (2) * tb.einsum(\"a,b,c,d,e,f,g->abcdefg\", one, minus, zero,\n plus, zero, minus, one)\n out3 = -1. / (2) * tb.einsum(\"a,b,c,d,e,f,g->abcdefg\", one, plus, one,\n plus, one, plus, one)\n out4 = 1. / (2 * np.sqrt(2)) * tb.einsum(\n \"a,b,c,d,e,f,g->abcdefg\", plus, zero, minus, one, minus, zero, plus)\n out5 = 1. / (2 * np.sqrt(2)) * tb.einsum(\n \"a,b,c,d,e,f,g->abcdefg\", plus, zero, minus, one, plus, one, minus)\n out6 = 1. / (2 * np.sqrt(2)) * tb.einsum(\n \"a,b,c,d,e,f,g->abcdefg\", plus, zero, plus, zero, minus, one, minus)\n out7 = -1. / (2) * tb.einsum(\"a,b,c,d,e,f,g->abcdefg\", one, plus, one,\n minus, zero, minus, one)\n out8 = 1. / (2 * np.sqrt(2)) * tb.einsum(\"a,b,c,d,e,f,g->abcdefg\", minus,\n one, plus, one, minus, zero, plus)\n out9 = -1. / (2) * tb.einsum(\"a,b,c,d,e,f,g->abcdefg\", one, minus, zero,\n minus, one, plus, one)\n out10 = 1. / (2 * np.sqrt(2)) * tb.einsum(\"a,b,c,d,e,f,g->abcdefg\", minus,\n one, plus, one, plus, one, minus)\n out11 = 1. / (2 * np.sqrt(2)) * tb.einsum(\n \"a,b,c,d,e,f,g->abcdefg\", plus, zero, plus, zero, plus, zero, plus)\n out12 = 1. / (2 * np.sqrt(2)) * tb.einsum(\n \"a,b,c,d,e,f,g->abcdefg\", minus, one, minus, zero, minus, one, minus)\n\n out = out1 + out2 + out3 + out4 + out5 + out6 + out7 + out8 + out9 + out10 + out11 + out12\n return out.ravel()\n\n\ndef build_edges(nvertices, mode):\n edges = []\n if mode == 'ring':\n v = 0\n for v in range(nvertices // 2):\n edges.append([v * 2 + 1, v * 2])\n edges.append([v * 2 + 1, (v * 2 + 2) % nvertices])\n if nvertices % 2 != 0:\n edges.append([0, nvertices - 1])\n elif mode == 'line':\n for v in range(nvertices - 1):\n edges.append([v, v + 1])\n elif mode == 'double-line':\n for v in range(nvertices // 2 - 1):\n edges.append([v, v + 1])\n for v in range(nvertices // 2, nvertices - 1):\n edges.append([v, v + 1])\n return edges\n\n\nif __name__ == '__main__':\n backend = 'numpy'\n nvertices = 8\n debug = True\n rank_threshold = 16\n cp_tol = 1e-10\n cp_maxiter = 3000\n cp_inneriter = 20\n init_als = 'random'\n mode = 'double-line'\n alsmode = 'als'\n\n tb = tensorbackends.get(backend)\n\n # define qstate\n qstate1 = candecomp.uniform(nvertices, backend=backend)\n qstate2 = candecomp.uniform(nvertices, backend=backend)\n statevector = qstate1.get_statevector()\n print(statevector)\n\n edges = build_edges(nvertices, mode)\n\n tracemalloc.start()\n\n graph_candecomp(qstate1,\n edges,\n alsmode=alsmode,\n rank_threshold=rank_threshold,\n cp_tol=cp_tol,\n cp_maxiter=cp_maxiter,\n cp_inneriter=cp_inneriter,\n init_als=init_als,\n debug=debug)\n graph_candecomp(qstate2,\n edges,\n rank_threshold=1e10,\n cp_tol=cp_tol,\n cp_maxiter=cp_maxiter,\n cp_inneriter=cp_inneriter,\n init_als=init_als,\n debug=debug)\n\n current_memory, peak_memory = tracemalloc.get_traced_memory()\n tracemalloc.stop()\n print(f'current_memory is {current_memory / (1024 * 1024)} MB')\n print(f'peak_memory is {peak_memory / (1024 * 1024)} MB')\n\n out_statevector = qstate1.get_statevector().ravel()\n out_true = qstate2.get_statevector().ravel()\n # out_conjecture = get_7_nodes_conjecture_state(backend)\n\n print(out_statevector)\n print(out_true)\n # print(tb.norm(out_true - out_conjecture))\n print(qstate1.factors)\n\n print(\n f\"Relative residual norm is {relative_residual(out_statevector, out_true)}\"\n )\n print(f\"Fidelity is {fidelity(out_statevector, out_true)}\")\n print(f\"Fidelity lower bound is {qstate1.fidelity_lower}\")\n print(f\"Fidelity average is {qstate1.fidelity_avg}\")\n"
] |
[
[
"numpy.asarray",
"numpy.sqrt"
]
] |
aryoepigene/text
|
[
"943e370cb100e160bbf9809d807f0ac1032f6f19"
] |
[
"test/data/test_builtin_datasets.py"
] |
[
"#!/user/bin/env python3\n# Note that all the tests in this module require dataset (either network access or cached)\nimport os\nimport shutil\nimport torchtext.data as data\nfrom torchtext.datasets import AG_NEWS\nimport torch\nfrom torch.testing import assert_allclose\nfrom ..common.torchtext_test_case import TorchtextTestCase\n\n\ndef conditional_remove(f):\n if os.path.isfile(f):\n os.remove(f)\n elif os.path.isdir(f):\n shutil.rmtree(f)\n\n\nclass TestDataset(TorchtextTestCase):\n def test_wikitext2_legacy(self):\n from torchtext.datasets import WikiText2\n # smoke test to ensure wikitext2 works properly\n\n # NOTE\n # test_wikitext2 and test_wikitext2_legacy have some cache incompatibility.\n # Keeping one's cache make the other fail. So we need to clean up the cache dir\n cachedir = os.path.join(self.project_root, \".data\", \"wikitext-2\")\n conditional_remove(cachedir)\n\n ds = WikiText2\n TEXT = data.Field(lower=True, batch_first=True)\n train, valid, test = ds.splits(TEXT)\n TEXT.build_vocab(train)\n train_iter, valid_iter, test_iter = data.BPTTIterator.splits(\n (train, valid, test), batch_size=3, bptt_len=30)\n\n train_iter, valid_iter, test_iter = ds.iters(batch_size=4,\n bptt_len=30)\n\n conditional_remove(cachedir)\n\n def test_wikitext2(self):\n from torchtext.experimental.datasets import WikiText2\n # smoke test to ensure wikitext2 works properly\n\n # NOTE\n # test_wikitext2 and test_wikitext2_legacy have some cache incompatibility.\n # Keeping one's cache make the other fail. So we need to clean up the cache dir\n cachedir = os.path.join(self.project_root, \".data\", \"wikitext-2\")\n conditional_remove(cachedir)\n cachefile = os.path.join(self.project_root, \".data\", \"wikitext-2-v1.zip\")\n conditional_remove(cachefile)\n\n train_dataset, test_dataset, valid_dataset = WikiText2()\n self.assertEqual(len(train_dataset), 2049990)\n self.assertEqual(len(test_dataset), 241859)\n self.assertEqual(len(valid_dataset), 214417)\n\n vocab = train_dataset.get_vocab()\n tokens_ids = [vocab[token] for token in 'the player characters rest'.split()]\n self.assertEqual(tokens_ids, [2, 286, 503, 700])\n\n conditional_remove(cachedir)\n conditional_remove(cachefile)\n\n def test_penntreebank_legacy(self):\n from torchtext.datasets import PennTreebank\n # smoke test to ensure penn treebank works properly\n TEXT = data.Field(lower=True, batch_first=True)\n ds = PennTreebank\n train, valid, test = ds.splits(TEXT)\n TEXT.build_vocab(train)\n train_iter, valid_iter, test_iter = data.BPTTIterator.splits(\n (train, valid, test), batch_size=3, bptt_len=30)\n\n train_iter, valid_iter, test_iter = ds.iters(batch_size=4,\n bptt_len=30)\n\n def test_penntreebank(self):\n from torchtext.experimental.datasets import PennTreebank\n # smoke test to ensure wikitext2 works properly\n train_dataset, test_dataset, valid_dataset = PennTreebank()\n self.assertEqual(len(train_dataset), 924412)\n self.assertEqual(len(test_dataset), 82114)\n self.assertEqual(len(valid_dataset), 73339)\n\n vocab = train_dataset.get_vocab()\n tokens_ids = [vocab[token] for token in 'the player characters rest'.split()]\n self.assertEqual(tokens_ids, [2, 2550, 3344, 1125])\n\n def test_text_classification(self):\n # smoke test to ensure ag_news dataset works properly\n\n datadir = os.path.join(self.project_root, \".data\")\n if not os.path.exists(datadir):\n os.makedirs(datadir)\n ag_news_train, ag_news_test = AG_NEWS(root=datadir, ngrams=3)\n self.assertEqual(len(ag_news_train), 120000)\n self.assertEqual(len(ag_news_test), 7600)\n assert_allclose(ag_news_train[-1][1][:10],\n torch.tensor([3525, 319, 4053, 34, 5407, 3607, 70, 6798, 10599, 4053]).long())\n assert_allclose(ag_news_test[-1][1][:10],\n torch.tensor([2351, 758, 96, 38581, 2351, 220, 5, 396, 3, 14786]).long())\n\n def test_imdb(self):\n from torchtext.experimental.datasets import IMDB\n from torchtext.vocab import Vocab\n # smoke test to ensure imdb works properly\n train_dataset, test_dataset = IMDB()\n self.assertEqual(len(train_dataset), 25000)\n self.assertEqual(len(test_dataset), 25000)\n assert_allclose(train_dataset[0][1][:10],\n torch.tensor([13, 1568, 13, 246, 35468, 43, 64, 398, 1135, 92]).long())\n assert_allclose(train_dataset[-1][1][:10],\n torch.tensor([2, 71, 4555, 194, 3328, 15144, 42, 227, 148, 8]).long())\n assert_allclose(test_dataset[0][1][:10],\n torch.tensor([13, 125, 1051, 5, 246, 1652, 8, 277, 66, 20]).long())\n assert_allclose(test_dataset[-1][1][:10],\n torch.tensor([13, 1035, 14, 21, 28, 2, 1051, 1275, 1008, 3]).long())\n\n # Test API with a vocab input object\n old_vocab = train_dataset.get_vocab()\n new_vocab = Vocab(counter=old_vocab.freqs, max_size=2500)\n new_train_data, new_test_data = IMDB(vocab=new_vocab)\n"
] |
[
[
"torch.tensor"
]
] |
david-octo/annotation
|
[
"575c93ff2c2f226819e7bb76c99541d031bde677"
] |
[
"datumaro/tests/test_coco_format.py"
] |
[
"import numpy as np\nimport os.path as osp\n\nfrom unittest import TestCase\n\nfrom datumaro.components.project import Project\nfrom datumaro.components.extractor import (Extractor, DatasetItem,\n AnnotationType, Label, Mask, Points, Polygon, Bbox, Caption,\n LabelCategories, PointsCategories\n)\nfrom datumaro.plugins.coco_format.converter import (\n CocoConverter,\n CocoImageInfoConverter,\n CocoCaptionsConverter,\n CocoInstancesConverter,\n CocoPersonKeypointsConverter,\n CocoLabelsConverter,\n)\nfrom datumaro.plugins.coco_format.importer import CocoImporter\nfrom datumaro.util.image import Image\nfrom datumaro.util.test_utils import TestDir, compare_datasets\n\n\nDUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'coco_dataset')\n\nclass CocoImporterTest(TestCase):\n def test_can_import(self):\n class DstExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id='000000000001', image=np.ones((10, 5, 3)),\n subset='val', attributes={'id': 1},\n annotations=[\n Polygon([0, 0, 1, 0, 1, 2, 0, 2], label=0,\n id=1, group=1, attributes={'is_crowd': False}),\n Mask(np.array(\n [[1, 0, 0, 1, 0]] * 5 +\n [[1, 1, 1, 1, 0]] * 5\n ), label=0,\n id=2, group=2, attributes={'is_crowd': True}),\n ]\n ),\n ])\n\n def categories(self):\n label_cat = LabelCategories()\n label_cat.add('TEST')\n return { AnnotationType.label: label_cat }\n\n dataset = Project.import_from(DUMMY_DATASET_DIR, 'coco') \\\n .make_dataset()\n\n compare_datasets(self, DstExtractor(), dataset)\n\n def test_can_detect(self):\n self.assertTrue(CocoImporter.detect(DUMMY_DATASET_DIR))\n\nclass CocoConverterTest(TestCase):\n def _test_save_and_load(self, source_dataset, converter, test_dir,\n target_dataset=None, importer_args=None):\n converter(source_dataset, test_dir)\n\n if importer_args is None:\n importer_args = {}\n parsed_dataset = CocoImporter()(test_dir, **importer_args).make_dataset()\n\n if target_dataset is None:\n target_dataset = source_dataset\n\n compare_datasets(self, expected=target_dataset, actual=parsed_dataset)\n\n def test_can_save_and_load_captions(self):\n class TestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='train',\n annotations=[\n Caption('hello', id=1, group=1),\n Caption('world', id=2, group=2),\n ], attributes={'id': 1}),\n DatasetItem(id=2, subset='train',\n annotations=[\n Caption('test', id=3, group=3),\n ], attributes={'id': 2}),\n\n DatasetItem(id=3, subset='val',\n annotations=[\n Caption('word', id=1, group=1),\n ], attributes={'id': 1}\n ),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n CocoCaptionsConverter(), test_dir)\n\n def test_can_save_and_load_instances(self):\n label_categories = LabelCategories()\n for i in range(10):\n label_categories.add(str(i))\n categories = { AnnotationType.label: label_categories }\n\n class TestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='train', image=np.ones((4, 4, 3)),\n annotations=[\n # Bbox + single polygon\n Bbox(0, 1, 2, 2,\n label=2, group=1, id=1,\n attributes={ 'is_crowd': False }),\n Polygon([0, 1, 2, 1, 2, 3, 0, 3],\n attributes={ 'is_crowd': False },\n label=2, group=1, id=1),\n ], attributes={'id': 1}),\n DatasetItem(id=2, subset='train', image=np.ones((4, 4, 3)),\n annotations=[\n # Mask + bbox\n Mask(np.array([\n [0, 1, 0, 0],\n [0, 1, 0, 0],\n [0, 1, 1, 1],\n [0, 0, 0, 0]],\n ),\n attributes={ 'is_crowd': True },\n label=4, group=3, id=3),\n Bbox(1, 0, 2, 2, label=4, group=3, id=3,\n attributes={ 'is_crowd': True }),\n ], attributes={'id': 2}),\n\n DatasetItem(id=3, subset='val', image=np.ones((4, 4, 3)),\n annotations=[\n # Bbox + mask\n Bbox(0, 1, 2, 2, label=4, group=3, id=3,\n attributes={ 'is_crowd': True }),\n Mask(np.array([\n [0, 0, 0, 0],\n [1, 1, 1, 0],\n [1, 1, 0, 0],\n [0, 0, 0, 0]],\n ),\n attributes={ 'is_crowd': True },\n label=4, group=3, id=3),\n ], attributes={'id': 1}),\n ])\n\n def categories(self):\n return categories\n\n class DstExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='train', image=np.ones((4, 4, 3)),\n annotations=[\n Polygon([0, 1, 2, 1, 2, 3, 0, 3],\n attributes={ 'is_crowd': False },\n label=2, group=1, id=1),\n ], attributes={'id': 1}),\n DatasetItem(id=2, subset='train', image=np.ones((4, 4, 3)),\n annotations=[\n Mask(np.array([\n [0, 1, 0, 0],\n [0, 1, 0, 0],\n [0, 1, 1, 1],\n [0, 0, 0, 0]],\n ),\n attributes={ 'is_crowd': True },\n label=4, group=3, id=3),\n ], attributes={'id': 2}),\n\n DatasetItem(id=3, subset='val', image=np.ones((4, 4, 3)),\n annotations=[\n Mask(np.array([\n [0, 0, 0, 0],\n [1, 1, 1, 0],\n [1, 1, 0, 0],\n [0, 0, 0, 0]],\n ),\n attributes={ 'is_crowd': True },\n label=4, group=3, id=3),\n ], attributes={'id': 1}),\n ])\n\n def categories(self):\n return categories\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n CocoInstancesConverter(), test_dir,\n target_dataset=DstExtractor())\n\n def test_can_merge_polygons_on_loading(self):\n label_categories = LabelCategories()\n for i in range(10):\n label_categories.add(str(i))\n categories = { AnnotationType.label: label_categories }\n\n class SrcExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, image=np.zeros((6, 10, 3)),\n annotations=[\n Polygon([0, 0, 4, 0, 4, 4],\n label=3, id=4, group=4),\n Polygon([5, 0, 9, 0, 5, 5],\n label=3, id=4, group=4),\n ]\n ),\n ])\n\n def categories(self):\n return categories\n\n class DstExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, image=np.zeros((6, 10, 3)),\n annotations=[\n Mask(np.array([\n [0, 1, 1, 1, 0, 1, 1, 1, 1, 0],\n [0, 0, 1, 1, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 1, 0, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\n # only internal fragment (without the border),\n # but not everywhere...\n ),\n label=3, id=4, group=4,\n attributes={ 'is_crowd': False }),\n ], attributes={'id': 1}\n ),\n ])\n\n def categories(self):\n return categories\n\n with TestDir() as test_dir:\n self._test_save_and_load(SrcExtractor(),\n CocoInstancesConverter(), test_dir,\n importer_args={'merge_instance_polygons': True},\n target_dataset=DstExtractor())\n\n def test_can_crop_covered_segments(self):\n label_categories = LabelCategories()\n for i in range(10):\n label_categories.add(str(i))\n\n class SrcTestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, image=np.zeros((5, 5, 3)),\n annotations=[\n Mask(np.array([\n [0, 0, 1, 1, 1],\n [0, 0, 1, 1, 1],\n [1, 1, 0, 1, 1],\n [1, 1, 1, 0, 0],\n [1, 1, 1, 0, 0]],\n ),\n label=2, id=1, z_order=0),\n Polygon([1, 1, 4, 1, 4, 4, 1, 4],\n label=1, id=2, z_order=1),\n ]\n ),\n ])\n\n def categories(self):\n return { AnnotationType.label: label_categories }\n\n class DstTestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, image=np.zeros((5, 5, 3)),\n annotations=[\n Mask(np.array([\n [0, 0, 1, 1, 1],\n [0, 0, 0, 0, 1],\n [1, 0, 0, 0, 1],\n [1, 0, 0, 0, 0],\n [1, 1, 1, 0, 0]],\n ),\n attributes={ 'is_crowd': True },\n label=2, id=1, group=1),\n\n Polygon([1, 1, 4, 1, 4, 4, 1, 4],\n label=1, id=2, group=2,\n attributes={ 'is_crowd': False }),\n ], attributes={'id': 1}\n ),\n ])\n\n def categories(self):\n return { AnnotationType.label: label_categories }\n\n with TestDir() as test_dir:\n self._test_save_and_load(SrcTestExtractor(),\n CocoInstancesConverter(crop_covered=True), test_dir,\n target_dataset=DstTestExtractor())\n\n def test_can_convert_polygons_to_mask(self):\n label_categories = LabelCategories()\n for i in range(10):\n label_categories.add(str(i))\n\n class SrcTestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, image=np.zeros((6, 10, 3)),\n annotations=[\n Polygon([0, 0, 4, 0, 4, 4],\n label=3, id=4, group=4),\n Polygon([5, 0, 9, 0, 5, 5],\n label=3, id=4, group=4),\n ]\n ),\n ])\n\n def categories(self):\n return { AnnotationType.label: label_categories }\n\n class DstTestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, image=np.zeros((6, 10, 3)),\n annotations=[\n Mask(np.array([\n [0, 1, 1, 1, 0, 1, 1, 1, 1, 0],\n [0, 0, 1, 1, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 1, 0, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],\n # only internal fragment (without the border),\n # but not everywhere...\n ),\n attributes={ 'is_crowd': True },\n label=3, id=4, group=4),\n ], attributes={'id': 1}\n ),\n ])\n\n def categories(self):\n return { AnnotationType.label: label_categories }\n\n with TestDir() as test_dir:\n self._test_save_and_load(SrcTestExtractor(),\n CocoInstancesConverter(segmentation_mode='mask'), test_dir,\n target_dataset=DstTestExtractor())\n\n def test_can_convert_masks_to_polygons(self):\n label_categories = LabelCategories()\n for i in range(10):\n label_categories.add(str(i))\n\n class SrcExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, image=np.zeros((5, 10, 3)),\n annotations=[\n Mask(np.array([\n [0, 1, 1, 1, 0, 1, 1, 1, 1, 0],\n [0, 0, 1, 1, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 1, 0, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ]),\n label=3, id=4, group=4),\n ]\n ),\n ])\n\n def categories(self):\n return { AnnotationType.label: label_categories }\n\n class DstExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, image=np.zeros((5, 10, 3)),\n annotations=[\n Polygon(\n [3.0, 2.5, 1.0, 0.0, 3.5, 0.0, 3.0, 2.5],\n label=3, id=4, group=4,\n attributes={ 'is_crowd': False }),\n Polygon(\n [5.0, 3.5, 4.5, 0.0, 8.0, 0.0, 5.0, 3.5],\n label=3, id=4, group=4,\n attributes={ 'is_crowd': False }),\n ], attributes={'id': 1}\n ),\n ])\n\n def categories(self):\n return { AnnotationType.label: label_categories }\n\n with TestDir() as test_dir:\n self._test_save_and_load(SrcExtractor(),\n CocoInstancesConverter(segmentation_mode='polygons'), test_dir,\n target_dataset=DstExtractor())\n\n def test_can_save_and_load_images(self):\n class TestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='train', attributes={'id': 1}),\n DatasetItem(id=2, subset='train', attributes={'id': 2}),\n\n DatasetItem(id=2, subset='val', attributes={'id': 2}),\n DatasetItem(id=3, subset='val', attributes={'id': 3}),\n DatasetItem(id=4, subset='val', attributes={'id': 4}),\n\n DatasetItem(id=5, subset='test', attributes={'id': 1}),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n CocoImageInfoConverter(), test_dir)\n\n def test_can_save_and_load_labels(self):\n class TestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='train',\n annotations=[\n Label(4, id=1, group=1),\n Label(9, id=2, group=2),\n ], attributes={'id': 1}\n ),\n ])\n\n def categories(self):\n label_categories = LabelCategories()\n for i in range(10):\n label_categories.add(str(i))\n return {\n AnnotationType.label: label_categories,\n }\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n CocoLabelsConverter(), test_dir)\n\n def test_can_save_and_load_keypoints(self):\n label_categories = LabelCategories()\n points_categories = PointsCategories()\n for i in range(10):\n label_categories.add(str(i))\n points_categories.add(i, joints=[[0, 1], [1, 2]])\n categories = {\n AnnotationType.label: label_categories,\n AnnotationType.points: points_categories,\n }\n\n class TestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='train', image=np.zeros((5, 5, 3)),\n annotations=[\n # Full instance annotations: polygon + keypoints\n Points([0, 0, 0, 2, 4, 1], [0, 1, 2],\n label=3, group=1, id=1),\n Polygon([0, 0, 4, 0, 4, 4],\n label=3, group=1, id=1),\n\n # Full instance annotations: bbox + keypoints\n Points([1, 2, 3, 4, 2, 3], group=2, id=2),\n Bbox(1, 2, 2, 2, group=2, id=2),\n\n # Solitary keypoints\n Points([1, 2, 0, 2, 4, 1], label=5, id=3),\n\n # Some other solitary annotations (bug #1387)\n Polygon([0, 0, 4, 0, 4, 4], label=3, id=4),\n\n # Solitary keypoints with no label\n Points([0, 0, 1, 2, 3, 4], [0, 1, 2], id=5),\n ])\n ])\n\n def categories(self):\n return categories\n\n class DstTestExtractor(TestExtractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='train', image=np.zeros((5, 5, 3)),\n annotations=[\n Points([0, 0, 0, 2, 4, 1], [0, 1, 2],\n label=3, group=1, id=1,\n attributes={'is_crowd': False}),\n Polygon([0, 0, 4, 0, 4, 4],\n label=3, group=1, id=1,\n attributes={'is_crowd': False}),\n\n Points([1, 2, 3, 4, 2, 3],\n group=2, id=2,\n attributes={'is_crowd': False}),\n Polygon([1, 2, 3, 2, 3, 4, 1, 4],\n group=2, id=2,\n attributes={'is_crowd': False}),\n\n Points([1, 2, 0, 2, 4, 1],\n label=5, group=3, id=3,\n attributes={'is_crowd': False}),\n Polygon([0, 1, 4, 1, 4, 2, 0, 2],\n label=5, group=3, id=3,\n attributes={'is_crowd': False}),\n\n Points([0, 0, 1, 2, 3, 4], [0, 1, 2],\n group=5, id=5,\n attributes={'is_crowd': False}),\n Polygon([1, 2, 3, 2, 3, 4, 1, 4],\n group=5, id=5,\n attributes={'is_crowd': False}),\n ], attributes={'id': 1}),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n CocoPersonKeypointsConverter(), test_dir,\n target_dataset=DstTestExtractor())\n\n def test_can_save_dataset_with_no_subsets(self):\n class TestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, attributes={'id': 1}),\n DatasetItem(id=2, attributes={'id': 2}),\n ])\n\n def categories(self):\n return { AnnotationType.label: LabelCategories() }\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n CocoConverter(), test_dir)\n\n def test_can_save_dataset_with_image_info(self):\n class TestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, image=Image(path='1.jpg', size=(10, 15)),\n attributes={'id': 1}),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n CocoConverter(tasks='image_info'), test_dir)\n\n def test_relative_paths(self):\n class TestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id='1', image=np.ones((4, 2, 3)),\n attributes={'id': 1}),\n DatasetItem(id='subdir1/1', image=np.ones((2, 6, 3)),\n attributes={'id': 2}),\n DatasetItem(id='subdir2/1', image=np.ones((5, 4, 3)),\n attributes={'id': 3}),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n CocoConverter(tasks='image_info', save_images=True), test_dir)\n\n def test_preserve_coco_ids(self):\n class TestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id='some/name1', image=np.ones((4, 2, 3)),\n attributes={'id': 40}),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n CocoConverter(tasks='image_info', save_images=True), test_dir)\n\n def test_annotation_attributes(self):\n class TestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, image=np.ones((4, 2, 3)), annotations=[\n Polygon([0, 0, 4, 0, 4, 4], label=5, group=1, id=1,\n attributes={'is_crowd': False, 'x': 5, 'y': 'abc'}),\n ], attributes={'id': 1})\n ])\n\n def categories(self):\n label_categories = LabelCategories()\n for i in range(10):\n label_categories.add(str(i))\n return { AnnotationType.label: label_categories, }\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n CocoConverter(), test_dir)"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.ones"
]
] |
dgSPARSE/dgNN
|
[
"3f8f2642d10210b5dfcb4d91034b6c008dd2213c"
] |
[
"test/perf_test/train_gat_pyg_cpu.py"
] |
[
"from torch_geometric.nn import GATConv\nimport torch.nn.functional as F\nimport torch\nimport time\nimport argparse\nimport dgl\nimport GPUtil\nimport scipy.sparse as sp\n\nclass Net(torch.nn.Module):\n def __init__(self,\n edge_index,\n num_layers,\n in_dim,\n num_hidden,\n num_classes,\n heads,\n activation=None,\n feat_drop=0.,\n attn_drop=0.,\n negative_slope=0.2,\n residual=None):\n super().__init__()\n self.edge_index=edge_index\n self.num_layers = num_layers\n self.gat_layers = torch.nn.ModuleList()\n self.activation = activation\n # input projection (no residual)\n self.gat_layers.append(GATConv(\n in_dim, num_hidden, heads[0],negative_slope=negative_slope,dropout=attn_drop))\n # hidden layers\n for l in range(1, num_layers):\n # due to multi-head, the in_dim = num_hidden * num_heads\n self.gat_layers.append(GATConv(\n num_hidden * heads[l-1], num_hidden, heads[l],negative_slope=negative_slope,dropout=attn_drop))\n # output projection\n self.gat_layers.append(GATConv(\n num_hidden * heads[-2], num_classes, heads[-1],negative_slope=negative_slope,dropout=attn_drop))\n self.dropout = torch.nn.Dropout(feat_drop)\n\n\n def forward(self, x):\n h = x\n for l in range(self.num_layers):\n if l != 0:\n h = self.dropout(h)\n h = self.gat_layers[l](h,self.edge_index) # h.shape[-1] = num_heads*out_feats\n # output projection\n logits = self.gat_layers[-1](h,self.edge_index)\n return logits\n\ndef accuracy(logits, labels):\n _, indices = torch.max(logits, dim=1)\n correct = torch.sum(indices == labels)\n return correct.item() * 1.0 / len(labels)\n\n\ndef load_dataset(args):\n if args.dataset == 'cora':\n data = dgl.data.CoraGraphDataset()\n elif args.dataset == 'citeseer':\n data = dgl.data.CiteseerGraphDataset()\n elif args.dataset == 'pubmed':\n data = dgl.data.PubmedGraphDataset()\n elif args.dataset == 'reddit':\n data = dgl.data.RedditDataset()\n else:\n raise ValueError('Unknown dataset: {}'.format(args.dataset))\n\n g = data[0]\n # add self loop\n g = dgl.remove_self_loop(g)\n g = dgl.add_self_loop(g)\n \n col,row=g.edges(order='srcdst')\n edge_idx=torch.stack((col,row))\n adj_csr = sp.csr_matrix((torch.ones(row.shape), (row, col)), shape=(g.num_nodes(), g.num_nodes()))\n \n row_ptr=torch.from_numpy(adj_csr.indptr)\n col_ind=torch.from_numpy(adj_csr.indices)\n\n adj_csc=adj_csr.tocsc()\n\n col_ptr=torch.from_numpy(adj_csc.indptr)\n row_ind=torch.from_numpy(adj_csc.indices)\n\n features=g.ndata['feat']\n labels=g.ndata['label']\n n_feats=features.shape[1]\n n_classes=data.num_labels\n train_mask = g.ndata['train_mask']\n test_mask = g.ndata['test_mask']\n\n return row_ptr,col_ind,col_ptr,row_ind,edge_idx,features,labels,n_feats,n_classes,train_mask,test_mask\n\ndef main(args):\n #load dataset\n row_ptr,col_ind,col_ptr,row_ind,edge_idx,features,labels,n_feats,n_classes,train_mask,test_mask=load_dataset(args)\n\n # row_ptr=row_ptr.to(args.gpu).int()\n # col_ind=col_ind.to(args.gpu).int()\n # col_ptr=col_ptr.to(args.gpu).int()\n # row_ind=row_ind.to(args.gpu).int()\n edge_idx=edge_idx.long()\n\n\n heads = ([args.n_heads] * args.n_layers) + [1]\n model=Net(edge_idx,args.n_layers,n_feats,args.n_hidden,n_classes,heads,attn_drop=args.attn_drop,feat_drop=args.dropout,negative_slope=args.negative_slope)\n loss_fcn = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n # print('warm up')\n # maxMemory = 0\n # for _ in range(10):\n # model.train()\n # logits = model(features)\n # loss = loss_fcn(logits[train_mask], labels[train_mask])\n # optimizer.zero_grad()\n # loss.backward()\n # optimizer.step() \n # GPUs = GPUtil.getGPUs()\n # maxMemory = max(GPUs[0].memoryUsed, maxMemory) \n \n print('profile training')\n model.train()\n # torch.cuda.synchronize()\n start=time.time()\n for _ in range(args.n_epochs):\n logits=model(features)\n loss=loss_fcn(logits[train_mask],labels[train_mask])\n optimizer.zero_grad()\n loss.backward()\n optimizer.step() \n print(loss.item())\n # torch.cuda.synchronize()\n end=time.time()\n train_time=(end-start)/args.n_epochs\n\n print('profile inference')\n model.eval()\n torch.cuda.synchronize()\n start=time.time()\n for epoch in range(args.n_epochs): \n with torch.no_grad():\n logits=model(features)\n torch.cuda.synchronize()\n end=time.time()\n inference_time=(end-start)/args.n_epochs\n \n logits = logits[test_mask] \n acc=accuracy(logits, labels[test_mask])\n print(\"Test Accuracy {:.4f}\".format(acc))\n print(f'max memory:{maxMemory}MB')\n print(\"train time:\",train_time)\n print(\"inference time:\",inference_time)\n\n if args.output!=None:\n with open(\"{}\".format(args.output),'a') as f:\n print(\"train_GAT_pyg,{} heads={} hidden_dim={},{:f}s,{:f}s,{}MB\".format(args.dataset,args.n_heads,args.n_hidden,train_time,inference_time,maxMemory),file=f)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='GAT')\n parser.add_argument(\"--dataset\",type=str,default=\"cora\")\n parser.add_argument(\"--gpu\", type=int, default=0,\n help=\"which GPU to use. Set -1 to use CPU.\")\n parser.add_argument(\"--lr\", type=float, default=1e-3,\n help=\"learning rate\")\n parser.add_argument(\"--weight-decay\", type=float, default=5e-4,\n help=\"Weight for L2 loss\")\n parser.add_argument(\"--dropout\", type=float, default=0.5,\n help=\"dropout probability\")\n parser.add_argument(\"--n-epochs\", type=int, default=200,\n help=\"number of training epochs\")\n parser.add_argument(\"--n-hidden\", type=int, default=16,\n help=\"number of hidden gcn units\")\n parser.add_argument(\"--n-layers\", type=int, default=1,\n help=\"number of hidden gcn layers\")\n parser.add_argument(\"--n-heads\", type=int, default=1,\n help=\"number of hidden attention heads\")\n parser.add_argument('--negative-slope', type=float, default=0.2,\n help=\"the negative slope of leaky relu\")\n parser.add_argument('--attn-drop',type=float,default=0.,\n help=\"drop out rate for attention weights\")\n parser.add_argument('--output',type=str,default=None,\n help=\"output file\")\n \n args = parser.parse_args()\n print(args)\n main(args)"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.cuda.synchronize",
"torch.nn.Dropout",
"torch.max",
"torch.ones",
"torch.nn.ModuleList",
"torch.sum",
"torch.from_numpy",
"torch.no_grad",
"torch.stack"
]
] |
kopankom/models
|
[
"3f78f4cfd21c786c62bf321c07830071027ebb5e"
] |
[
"research/object_detection/utils/config_util_test.py"
] |
[
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for object_detection.utils.config_util.\"\"\"\n\nimport os\n\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\n\nfrom object_detection.protos import eval_pb2\nfrom object_detection.protos import image_resizer_pb2\nfrom object_detection.protos import input_reader_pb2\nfrom object_detection.protos import model_pb2\nfrom object_detection.protos import pipeline_pb2\nfrom object_detection.protos import train_pb2\nfrom object_detection.utils import config_util\n\n\ndef _write_config(config, config_path):\n \"\"\"Writes a config object to disk.\"\"\"\n config_text = text_format.MessageToString(config)\n with tf.gfile.Open(config_path, \"wb\") as f:\n f.write(config_text)\n\n\ndef _update_optimizer_with_constant_learning_rate(optimizer, learning_rate):\n \"\"\"Adds a new constant learning rate.\"\"\"\n constant_lr = optimizer.learning_rate.constant_learning_rate\n constant_lr.learning_rate = learning_rate\n\n\ndef _update_optimizer_with_exponential_decay_learning_rate(\n optimizer, learning_rate):\n \"\"\"Adds a new exponential decay learning rate.\"\"\"\n exponential_lr = optimizer.learning_rate.exponential_decay_learning_rate\n exponential_lr.initial_learning_rate = learning_rate\n\n\ndef _update_optimizer_with_manual_step_learning_rate(\n optimizer, initial_learning_rate, learning_rate_scaling):\n \"\"\"Adds a learning rate schedule.\"\"\"\n manual_lr = optimizer.learning_rate.manual_step_learning_rate\n manual_lr.initial_learning_rate = initial_learning_rate\n for i in range(3):\n schedule = manual_lr.schedule.add()\n schedule.learning_rate = initial_learning_rate * learning_rate_scaling**i\n\n\nclass ConfigUtilTest(tf.test.TestCase):\n\n def test_get_configs_from_pipeline_file(self):\n \"\"\"Test that proto configs can be read from pipeline config file.\"\"\"\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n pipeline_config.model.faster_rcnn.num_classes = 10\n pipeline_config.train_config.batch_size = 32\n pipeline_config.train_input_reader.label_map_path = \"path/to/label_map\"\n pipeline_config.eval_config.num_examples = 20\n pipeline_config.eval_input_reader.queue_capacity = 100\n\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n self.assertProtoEquals(pipeline_config.model, configs[\"model\"])\n self.assertProtoEquals(pipeline_config.train_config,\n configs[\"train_config\"])\n self.assertProtoEquals(pipeline_config.train_input_reader,\n configs[\"train_input_config\"])\n self.assertProtoEquals(pipeline_config.eval_config,\n configs[\"eval_config\"])\n self.assertProtoEquals(pipeline_config.eval_input_reader,\n configs[\"eval_input_config\"])\n\n def test_create_pipeline_proto_from_configs(self):\n \"\"\"Tests that proto can be reconstructed from configs dictionary.\"\"\"\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n pipeline_config.model.faster_rcnn.num_classes = 10\n pipeline_config.train_config.batch_size = 32\n pipeline_config.train_input_reader.label_map_path = \"path/to/label_map\"\n pipeline_config.eval_config.num_examples = 20\n pipeline_config.eval_input_reader.queue_capacity = 100\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n pipeline_config_reconstructed = (\n config_util.create_pipeline_proto_from_configs(configs))\n self.assertEqual(pipeline_config, pipeline_config_reconstructed)\n\n def test_get_configs_from_multiple_files(self):\n \"\"\"Tests that proto configs can be read from multiple files.\"\"\"\n temp_dir = self.get_temp_dir()\n\n # Write model config file.\n model_config_path = os.path.join(temp_dir, \"model.config\")\n model = model_pb2.DetectionModel()\n model.faster_rcnn.num_classes = 10\n _write_config(model, model_config_path)\n\n # Write train config file.\n train_config_path = os.path.join(temp_dir, \"train.config\")\n train_config = train_config = train_pb2.TrainConfig()\n train_config.batch_size = 32\n _write_config(train_config, train_config_path)\n\n # Write train input config file.\n train_input_config_path = os.path.join(temp_dir, \"train_input.config\")\n train_input_config = input_reader_pb2.InputReader()\n train_input_config.label_map_path = \"path/to/label_map\"\n _write_config(train_input_config, train_input_config_path)\n\n # Write eval config file.\n eval_config_path = os.path.join(temp_dir, \"eval.config\")\n eval_config = eval_pb2.EvalConfig()\n eval_config.num_examples = 20\n _write_config(eval_config, eval_config_path)\n\n # Write eval input config file.\n eval_input_config_path = os.path.join(temp_dir, \"eval_input.config\")\n eval_input_config = input_reader_pb2.InputReader()\n eval_input_config.label_map_path = \"path/to/another/label_map\"\n _write_config(eval_input_config, eval_input_config_path)\n\n configs = config_util.get_configs_from_multiple_files(\n model_config_path=model_config_path,\n train_config_path=train_config_path,\n train_input_config_path=train_input_config_path,\n eval_config_path=eval_config_path,\n eval_input_config_path=eval_input_config_path)\n self.assertProtoEquals(model, configs[\"model\"])\n self.assertProtoEquals(train_config, configs[\"train_config\"])\n self.assertProtoEquals(train_input_config,\n configs[\"train_input_config\"])\n self.assertProtoEquals(eval_config, configs[\"eval_config\"])\n self.assertProtoEquals(eval_input_config,\n configs[\"eval_input_config\"])\n\n def _assertOptimizerWithNewLearningRate(self, optimizer_name):\n \"\"\"Asserts successful updating of all learning rate schemes.\"\"\"\n original_learning_rate = 0.7\n learning_rate_scaling = 0.1\n hparams = tf.contrib.training.HParams(learning_rate=0.15)\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n\n # Constant learning rate.\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name)\n _update_optimizer_with_constant_learning_rate(optimizer,\n original_learning_rate)\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n configs = config_util.merge_external_params_with_configs(configs, hparams)\n optimizer = getattr(configs[\"train_config\"].optimizer, optimizer_name)\n constant_lr = optimizer.learning_rate.constant_learning_rate\n self.assertAlmostEqual(hparams.learning_rate, constant_lr.learning_rate)\n\n # Exponential decay learning rate.\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name)\n _update_optimizer_with_exponential_decay_learning_rate(\n optimizer, original_learning_rate)\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n configs = config_util.merge_external_params_with_configs(configs, hparams)\n optimizer = getattr(configs[\"train_config\"].optimizer, optimizer_name)\n exponential_lr = optimizer.learning_rate.exponential_decay_learning_rate\n self.assertAlmostEqual(hparams.learning_rate,\n exponential_lr.initial_learning_rate)\n\n # Manual step learning rate.\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name)\n _update_optimizer_with_manual_step_learning_rate(\n optimizer, original_learning_rate, learning_rate_scaling)\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n configs = config_util.merge_external_params_with_configs(configs, hparams)\n optimizer = getattr(configs[\"train_config\"].optimizer, optimizer_name)\n manual_lr = optimizer.learning_rate.manual_step_learning_rate\n self.assertAlmostEqual(hparams.learning_rate,\n manual_lr.initial_learning_rate)\n for i, schedule in enumerate(manual_lr.schedule):\n self.assertAlmostEqual(hparams.learning_rate * learning_rate_scaling**i,\n schedule.learning_rate)\n\n def testRMSPropWithNewLearingRate(self):\n \"\"\"Tests new learning rates for RMSProp Optimizer.\"\"\"\n self._assertOptimizerWithNewLearningRate(\"rms_prop_optimizer\")\n\n def testMomentumOptimizerWithNewLearningRate(self):\n \"\"\"Tests new learning rates for Momentum Optimizer.\"\"\"\n self._assertOptimizerWithNewLearningRate(\"momentum_optimizer\")\n\n def testAdamOptimizerWithNewLearningRate(self):\n \"\"\"Tests new learning rates for Adam Optimizer.\"\"\"\n self._assertOptimizerWithNewLearningRate(\"adam_optimizer\")\n\n def testNewBatchSize(self):\n \"\"\"Tests that batch size is updated appropriately.\"\"\"\n original_batch_size = 2\n hparams = tf.contrib.training.HParams(batch_size=16)\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n pipeline_config.train_config.batch_size = original_batch_size\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n configs = config_util.merge_external_params_with_configs(configs, hparams)\n new_batch_size = configs[\"train_config\"].batch_size\n self.assertEqual(16, new_batch_size)\n\n def testNewBatchSizeWithClipping(self):\n \"\"\"Tests that batch size is clipped to 1 from below.\"\"\"\n original_batch_size = 2\n hparams = tf.contrib.training.HParams(batch_size=0.5)\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n pipeline_config.train_config.batch_size = original_batch_size\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n configs = config_util.merge_external_params_with_configs(configs, hparams)\n new_batch_size = configs[\"train_config\"].batch_size\n self.assertEqual(1, new_batch_size) # Clipped to 1.0.\n\n def testNewMomentumOptimizerValue(self):\n \"\"\"Tests that new momentum value is updated appropriately.\"\"\"\n original_momentum_value = 0.4\n hparams = tf.contrib.training.HParams(momentum_optimizer_value=1.1)\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n optimizer_config = pipeline_config.train_config.optimizer.rms_prop_optimizer\n optimizer_config.momentum_optimizer_value = original_momentum_value\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n configs = config_util.merge_external_params_with_configs(configs, hparams)\n optimizer_config = configs[\"train_config\"].optimizer.rms_prop_optimizer\n new_momentum_value = optimizer_config.momentum_optimizer_value\n self.assertAlmostEqual(1.0, new_momentum_value) # Clipped to 1.0.\n\n def testNewClassificationLocalizationWeightRatio(self):\n \"\"\"Tests that the loss weight ratio is updated appropriately.\"\"\"\n original_localization_weight = 0.1\n original_classification_weight = 0.2\n new_weight_ratio = 5.0\n hparams = tf.contrib.training.HParams(\n classification_localization_weight_ratio=new_weight_ratio)\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n pipeline_config.model.ssd.loss.localization_weight = (\n original_localization_weight)\n pipeline_config.model.ssd.loss.classification_weight = (\n original_classification_weight)\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n configs = config_util.merge_external_params_with_configs(configs, hparams)\n loss = configs[\"model\"].ssd.loss\n self.assertAlmostEqual(1.0, loss.localization_weight)\n self.assertAlmostEqual(new_weight_ratio, loss.classification_weight)\n\n def testNewFocalLossParameters(self):\n \"\"\"Tests that the loss weight ratio is updated appropriately.\"\"\"\n original_alpha = 1.0\n original_gamma = 1.0\n new_alpha = 0.3\n new_gamma = 2.0\n hparams = tf.contrib.training.HParams(\n focal_loss_alpha=new_alpha, focal_loss_gamma=new_gamma)\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n classification_loss = pipeline_config.model.ssd.loss.classification_loss\n classification_loss.weighted_sigmoid_focal.alpha = original_alpha\n classification_loss.weighted_sigmoid_focal.gamma = original_gamma\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n configs = config_util.merge_external_params_with_configs(configs, hparams)\n classification_loss = configs[\"model\"].ssd.loss.classification_loss\n self.assertAlmostEqual(new_alpha,\n classification_loss.weighted_sigmoid_focal.alpha)\n self.assertAlmostEqual(new_gamma,\n classification_loss.weighted_sigmoid_focal.gamma)\n\n def testMergingKeywordArguments(self):\n \"\"\"Tests that keyword arguments get merged as do hyperparameters.\"\"\"\n original_num_train_steps = 100\n original_num_eval_steps = 5\n desired_num_train_steps = 10\n desired_num_eval_steps = 1\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n pipeline_config.train_config.num_steps = original_num_train_steps\n pipeline_config.eval_config.num_examples = original_num_eval_steps\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n configs = config_util.merge_external_params_with_configs(\n configs,\n train_steps=desired_num_train_steps,\n eval_steps=desired_num_eval_steps)\n train_steps = configs[\"train_config\"].num_steps\n eval_steps = configs[\"eval_config\"].num_examples\n self.assertEqual(desired_num_train_steps, train_steps)\n self.assertEqual(desired_num_eval_steps, eval_steps)\n\n def testGetNumberOfClasses(self):\n \"\"\"Tests that number of classes can be retrieved.\"\"\"\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n pipeline_config.model.faster_rcnn.num_classes = 20\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n number_of_classes = config_util.get_number_of_classes(configs[\"model\"])\n self.assertEqual(20, number_of_classes)\n\n def testNewTrainInputPath(self):\n \"\"\"Tests that train input path can be overwritten with single file.\"\"\"\n original_train_path = [\"path/to/data\"]\n new_train_path = \"another/path/to/data\"\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n reader_config = pipeline_config.train_input_reader.tf_record_input_reader\n reader_config.input_path.extend(original_train_path)\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n configs = config_util.merge_external_params_with_configs(\n configs, train_input_path=new_train_path)\n reader_config = configs[\"train_input_config\"].tf_record_input_reader\n final_path = reader_config.input_path\n self.assertEqual([new_train_path], final_path)\n\n def testNewTrainInputPathList(self):\n \"\"\"Tests that train input path can be overwritten with multiple files.\"\"\"\n original_train_path = [\"path/to/data\"]\n new_train_path = [\"another/path/to/data\", \"yet/another/path/to/data\"]\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n reader_config = pipeline_config.train_input_reader.tf_record_input_reader\n reader_config.input_path.extend(original_train_path)\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n configs = config_util.merge_external_params_with_configs(\n configs, train_input_path=new_train_path)\n reader_config = configs[\"train_input_config\"].tf_record_input_reader\n final_path = reader_config.input_path\n self.assertEqual(new_train_path, final_path)\n\n def testNewLabelMapPath(self):\n \"\"\"Tests that label map path can be overwritten in input readers.\"\"\"\n original_label_map_path = \"path/to/original/label_map\"\n new_label_map_path = \"path//to/new/label_map\"\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n train_input_reader = pipeline_config.train_input_reader\n train_input_reader.label_map_path = original_label_map_path\n eval_input_reader = pipeline_config.eval_input_reader\n eval_input_reader.label_map_path = original_label_map_path\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n configs = config_util.merge_external_params_with_configs(\n configs, label_map_path=new_label_map_path)\n self.assertEqual(new_label_map_path,\n configs[\"train_input_config\"].label_map_path)\n self.assertEqual(new_label_map_path,\n configs[\"eval_input_config\"].label_map_path)\n\n def testDontOverwriteEmptyLabelMapPath(self):\n \"\"\"Tests that label map path will not by overwritten with empty string.\"\"\"\n original_label_map_path = \"path/to/original/label_map\"\n new_label_map_path = \"\"\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n train_input_reader = pipeline_config.train_input_reader\n train_input_reader.label_map_path = original_label_map_path\n eval_input_reader = pipeline_config.eval_input_reader\n eval_input_reader.label_map_path = original_label_map_path\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n configs = config_util.merge_external_params_with_configs(\n configs, label_map_path=new_label_map_path)\n self.assertEqual(original_label_map_path,\n configs[\"train_input_config\"].label_map_path)\n self.assertEqual(original_label_map_path,\n configs[\"eval_input_config\"].label_map_path)\n\n def testNewMaskType(self):\n \"\"\"Tests that mask type can be overwritten in input readers.\"\"\"\n original_mask_type = input_reader_pb2.NUMERICAL_MASKS\n new_mask_type = input_reader_pb2.PNG_MASKS\n pipeline_config_path = os.path.join(self.get_temp_dir(), \"pipeline.config\")\n\n pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\n train_input_reader = pipeline_config.train_input_reader\n train_input_reader.mask_type = original_mask_type\n eval_input_reader = pipeline_config.eval_input_reader\n eval_input_reader.mask_type = original_mask_type\n _write_config(pipeline_config, pipeline_config_path)\n\n configs = config_util.get_configs_from_pipeline_file(pipeline_config_path)\n configs = config_util.merge_external_params_with_configs(\n configs, mask_type=new_mask_type)\n self.assertEqual(new_mask_type, configs[\"train_input_config\"].mask_type)\n self.assertEqual(new_mask_type, configs[\"eval_input_config\"].mask_type)\n\n def test_get_image_resizer_config(self):\n \"\"\"Tests that number of classes can be retrieved.\"\"\"\n model_config = model_pb2.DetectionModel()\n model_config.faster_rcnn.image_resizer.fixed_shape_resizer.height = 100\n model_config.faster_rcnn.image_resizer.fixed_shape_resizer.width = 300\n image_resizer_config = config_util.get_image_resizer_config(model_config)\n self.assertEqual(image_resizer_config.fixed_shape_resizer.height, 100)\n self.assertEqual(image_resizer_config.fixed_shape_resizer.width, 300)\n\n def test_get_spatial_image_size_from_fixed_shape_resizer_config(self):\n image_resizer_config = image_resizer_pb2.ImageResizer()\n image_resizer_config.fixed_shape_resizer.height = 100\n image_resizer_config.fixed_shape_resizer.width = 200\n image_shape = config_util.get_spatial_image_size(image_resizer_config)\n self.assertAllEqual(image_shape, [100, 200])\n\n def test_get_spatial_image_size_from_aspect_preserving_resizer_config(self):\n image_resizer_config = image_resizer_pb2.ImageResizer()\n image_resizer_config.keep_aspect_ratio_resizer.min_dimension = 100\n image_resizer_config.keep_aspect_ratio_resizer.max_dimension = 600\n image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension = True\n image_shape = config_util.get_spatial_image_size(image_resizer_config)\n self.assertAllEqual(image_shape, [600, 600])\n\n def test_get_spatial_image_size_from_aspect_preserving_resizer_dynamic(self):\n image_resizer_config = image_resizer_pb2.ImageResizer()\n image_resizer_config.keep_aspect_ratio_resizer.min_dimension = 100\n image_resizer_config.keep_aspect_ratio_resizer.max_dimension = 600\n image_shape = config_util.get_spatial_image_size(image_resizer_config)\n self.assertAllEqual(image_shape, [-1, -1])\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] |
[
[
"tensorflow.contrib.training.HParams",
"tensorflow.test.main",
"tensorflow.gfile.Open"
]
] |
rupc/bsp-protos
|
[
"58833e7ab9ff53f3633708fb5f95edfdd152c5ea"
] |
[
"sample-demo/venv/Lib/site-packages/PySide6/examples/external/matplotlib/widget3d/widget3d.py"
] |
[
"#############################################################################\n##\n## Copyright (C) 2020 The Qt Company Ltd.\n## Contact: https://www.qt.io/licensing/\n##\n## This file is part of the Qt for Python examples of the Qt Toolkit.\n##\n## $QT_BEGIN_LICENSE:BSD$\n## You may use this file under the terms of the BSD license as follows:\n##\n## \"Redistribution and use in source and binary forms, with or without\n## modification, are permitted provided that the following conditions are\n## met:\n## * Redistributions of source code must retain the above copyright\n## notice, this list of conditions and the following disclaimer.\n## * Redistributions in binary form must reproduce the above copyright\n## notice, this list of conditions and the following disclaimer in\n## the documentation and/or other materials provided with the\n## distribution.\n## * Neither the name of The Qt Company Ltd nor the names of its\n## contributors may be used to endorse or promote products derived\n## from this software without specific prior written permission.\n##\n##\n## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n## \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\"\n##\n## $QT_END_LICENSE$\n##\n#############################################################################\n\nimport sys\n\nimport numpy as np\nfrom matplotlib.backends.backend_qt5agg import FigureCanvas\nfrom matplotlib.figure import Figure\nfrom mpl_toolkits.mplot3d import axes3d\nfrom PySide6.QtCore import Qt, Slot\nfrom PySide6.QtGui import QAction, QKeySequence\nfrom PySide6.QtWidgets import (QApplication, QComboBox, QHBoxLayout,\n QHeaderView, QLabel, QMainWindow, QSlider,\n QTableWidget, QTableWidgetItem, QVBoxLayout,\n QWidget)\n\n\n\"\"\"This example implements the interaction between Qt Widgets and a 3D\nmatplotlib plot\"\"\"\n\n\nclass ApplicationWindow(QMainWindow):\n def __init__(self, parent=None):\n QMainWindow.__init__(self, parent)\n\n self.column_names = [\"Column A\", \"Column B\", \"Column C\"]\n\n # Central widget\n self._main = QWidget()\n self.setCentralWidget(self._main)\n\n # Main menu bar\n self.menu = self.menuBar()\n self.menu_file = self.menu.addMenu(\"File\")\n exit = QAction(\"Exit\", self, triggered=qApp.quit)\n self.menu_file.addAction(exit)\n\n self.menu_about = self.menu.addMenu(\"&About\")\n about = QAction(\"About Qt\", self, shortcut=QKeySequence(QKeySequence.HelpContents),\n triggered=qApp.aboutQt)\n self.menu_about.addAction(about)\n\n # Figure (Left)\n self.fig = Figure(figsize=(5, 3))\n self.canvas = FigureCanvas(self.fig)\n\n # Sliders (Left)\n min = 0\n max = 360\n self.slider_azim = QSlider(minimum=min, maximum=max, orientation=Qt.Horizontal)\n self.slider_elev = QSlider(minimum=min, maximum=max, orientation=Qt.Horizontal)\n\n self.slider_azim_layout = QHBoxLayout()\n self.slider_azim_layout.addWidget(QLabel(f\"{min}\"))\n self.slider_azim_layout.addWidget(self.slider_azim)\n self.slider_azim_layout.addWidget(QLabel(f\"{max}\"))\n\n self.slider_elev_layout = QHBoxLayout()\n self.slider_elev_layout.addWidget(QLabel(f\"{min}\"))\n self.slider_elev_layout.addWidget(self.slider_elev)\n self.slider_elev_layout.addWidget(QLabel(f\"{max}\"))\n\n # Table (Right)\n self.table = QTableWidget()\n header = self.table.horizontalHeader()\n header.setSectionResizeMode(QHeaderView.Stretch)\n\n # ComboBox (Right)\n self.combo = QComboBox()\n self.combo.addItems([\"Wired\", \"Surface\", \"Triangular Surface\", \"Sphere\"])\n\n # Right layout\n rlayout = QVBoxLayout()\n rlayout.setContentsMargins(1, 1, 1, 1)\n rlayout.addWidget(QLabel(\"Plot type:\"))\n rlayout.addWidget(self.combo)\n rlayout.addWidget(self.table)\n\n # Left layout\n llayout = QVBoxLayout()\n rlayout.setContentsMargins(1, 1, 1, 1)\n llayout.addWidget(self.canvas, 88)\n llayout.addWidget(QLabel(\"Azimuth:\"), 1)\n llayout.addLayout(self.slider_azim_layout, 5)\n llayout.addWidget(QLabel(\"Elevation:\"), 1)\n llayout.addLayout(self.slider_elev_layout, 5)\n\n # Main layout\n layout = QHBoxLayout(self._main)\n layout.addLayout(llayout, 70)\n layout.addLayout(rlayout, 30)\n\n # Signal and Slots connections\n self.combo.currentTextChanged.connect(self.combo_option)\n self.slider_azim.valueChanged.connect(self.rotate_azim)\n self.slider_elev.valueChanged.connect(self.rotate_elev)\n\n # Initial setup\n self.plot_wire()\n self._ax.view_init(30, 30)\n self.slider_azim.setValue(30)\n self.slider_elev.setValue(30)\n self.fig.canvas.mpl_connect(\"button_release_event\", self.on_click)\n\n # Matplotlib slot method\n def on_click(self, event):\n azim, elev = self._ax.azim, self._ax.elev\n self.slider_azim.setValue(azim + 180)\n self.slider_elev.setValue(elev + 180)\n\n # Utils methods\n\n def set_table_data(self, X, Y, Z):\n for i in range(len(X)):\n self.table.setItem(i, 0, QTableWidgetItem(f\"{X[i]:.2f}\"))\n self.table.setItem(i, 1, QTableWidgetItem(f\"{Y[i]:.2f}\"))\n self.table.setItem(i, 2, QTableWidgetItem(f\"{Z[i]:.2f}\"))\n\n def set_canvas_table_configuration(self, row_count, data):\n self.fig.set_canvas(self.canvas)\n self._ax = self.canvas.figure.add_subplot(projection=\"3d\")\n\n self._ax.set_xlabel(self.column_names[0])\n self._ax.set_ylabel(self.column_names[1])\n self._ax.set_zlabel(self.column_names[2])\n\n self.table.setRowCount(row_count)\n self.table.setColumnCount(3)\n self.table.setHorizontalHeaderLabels(self.column_names)\n self.set_table_data(data[0], data[1], data[2])\n\n # Plot methods\n\n def plot_wire(self):\n # Data\n self.X, self.Y, self.Z = axes3d.get_test_data(0.03)\n\n self.set_canvas_table_configuration(len(self.X[0]), (self.X[0], self.Y[0], self.Z[0]))\n self._ax.plot_wireframe(self.X, self.Y, self.Z, rstride=10, cstride=10, cmap=\"viridis\")\n self.canvas.draw()\n\n def plot_surface(self):\n # Data\n self.X, self.Y = np.meshgrid(np.linspace(-6, 6, 30), np.linspace(-6, 6, 30))\n self.Z = np.sin(np.sqrt(self.X ** 2 + self.Y ** 2))\n\n self.set_canvas_table_configuration(len(self.X[0]), (self.X[0], self.Y[0], self.Z[0]))\n self._ax.plot_surface(self.X, self.Y, self.Z,\n rstride=1, cstride=1, cmap=\"viridis\", edgecolor=\"none\")\n self.canvas.draw()\n\n def plot_triangular_surface(self):\n # Data\n radii = np.linspace(0.125, 1.0, 8)\n angles = np.linspace(0, 2 * np.pi, 36, endpoint=False)[..., np.newaxis]\n self.X = np.append(0, (radii * np.cos(angles)).flatten())\n self.Y = np.append(0, (radii * np.sin(angles)).flatten())\n self.Z = np.sin(-self.X * self.Y)\n\n self.set_canvas_table_configuration(len(self.X), (self.X, self.Y, self.Z))\n self._ax.plot_trisurf(self.X, self.Y, self.Z, linewidth=0.2, antialiased=True)\n self.canvas.draw()\n\n def plot_sphere(self):\n # Data\n u = np.linspace(0, 2 * np.pi, 100)\n v = np.linspace(0, np.pi, 100)\n self.X = 10 * np.outer(np.cos(u), np.sin(v))\n self.Y = 10 * np.outer(np.sin(u), np.sin(v))\n self.Z = 9 * np.outer(np.ones(np.size(u)), np.cos(v))\n\n self.set_canvas_table_configuration(len(self.X), (self.X[0], self.Y[0], self.Z[0]))\n self._ax.plot_surface(self.X, self.Y, self.Z)\n self.canvas.draw()\n\n # Slots\n\n @Slot()\n def combo_option(self, text):\n if text == \"Wired\":\n self.plot_wire()\n elif text == \"Surface\":\n self.plot_surface()\n elif text == \"Triangular Surface\":\n self.plot_triangular_surface()\n elif text == \"Sphere\":\n self.plot_sphere()\n\n @Slot()\n def rotate_azim(self, value):\n self._ax.view_init(self._ax.elev, value)\n self.fig.set_canvas(self.canvas)\n self.canvas.draw()\n\n @Slot()\n def rotate_elev(self, value):\n self._ax.view_init(value, self._ax.azim)\n self.fig.set_canvas(self.canvas)\n self.canvas.draw()\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n w = ApplicationWindow()\n w.setFixedSize(1280, 720)\n w.show()\n app.exec()\n"
] |
[
[
"numpy.sqrt",
"numpy.linspace",
"matplotlib.figure.Figure",
"matplotlib.backends.backend_qt5agg.FigureCanvas",
"numpy.cos",
"numpy.sin",
"numpy.size"
]
] |
Zepyhrus/hie
|
[
"4040f36507c195c405c652e0c7b47fe582925c05"
] |
[
"hie/hieval.py"
] |
[
"import datetime\nimport time\nfrom collections import defaultdict\nimport copy\nfrom tqdm import tqdm\n\nimport numpy as np\nfrom scipy.optimize import linear_sum_assignment as KM\n\nimport cv2\n\nfrom .tools import iou, imread, BLUE, GREEN, RED, YELLOW\n\n\nclass HIEParams(object):\n '''\n Params for coco evaluation api\n '''\n def setDetParams(self):\n self.imgIds = []\n self.catIds = []\n # np.arange causes trouble. the data point on arange is slightly larger than the true value\n self.iouThrs = np.arange(50, 100, 5) / 100\n self.recThrs = np.arange( 0, 101) / 100\n self.maxDets = [1, 10, 100]\n self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n self.areaRngLbl = ['all', 'small', 'medium', 'large']\n self.useCats = 1\n\n def setKpParams(self):\n self.imgIds = []\n self.catIds = []\n # np.arange causes trouble. the data point on arange is slightly larger than the true value\n self.iouThrs = np.arange(50, 100, 5) / 100\n self.recThrs = np.arange( 0, 101) / 100\n self.maxDets = [256]\n self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n self.areaRngLbl = ['all', 'medium', 'large']\n self.useCats = 1\n\n def __init__(self, iouType='segm'):\n if iouType == 'segm' or iouType == 'bbox':\n self.setDetParams()\n elif iouType == 'keypoints':\n self.setKpParams()\n else:\n raise Exception('iouType not supported')\n self.iouType = iouType\n # useSegm is deprecated\n self.useSegm = None\n\n if iouType == 'keypoints':\n self.kpt_oks_sigmas = np.array([79, 79, 71, 72, 62, 79, 72, 62, 107, 87, 89, 107, 87, 89]) * 1.0 / 1000\n\n def info(self):\n for key, value in vars(self).items():\n print(f'{key}: {value}')\n\n\nclass HIEval(object):\n \"\"\"\n \"\"\"\n def __init__(self, cocoGt=None, cocoDt=None, iouType='keypoints'):\n '''\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n '''\n # TODO: add evaluation for other type of iouType\n assert iouType in ['bbox', 'keypoints'], 'Other type of evaluation is not supported yet!'\n\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self.params = {} # evaluation parameters\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = HIEParams(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = [] # result summarization\n self.ious = {} # ious between all gts and dts\n\n \n self.iou_type = iouType\n self.MATCH = defaultdict(list)\n self.__evaluated = False\n\n if not cocoGt is None:\n self.params.imgIds = sorted(cocoGt.getImgIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n rle = coco.annToRLE(ann)\n ann['segmentation'] = rle\n p = self.params\n if p.useCats:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n else:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n # set ignore flag\n for gt in gts:\n gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0\n gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']\n if p.iouType == 'keypoints':\n gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n for gt in gts:\n self._gts[gt['image_id'], gt['category_id']].append(gt)\n for dt in dts:\n self._dts[dt['image_id'], dt['category_id']].append(dt)\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n self.ious = {\n (imgId, catId): self.computeOKS(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds\n }\n\n evaluateImg = self.evaluateImg\n maxDet = p.maxDets[-1]\n self.evalImgs = [\n evaluateImg(imgId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for imgId in p.imgIds\n ]\n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeOKS(self, imgId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n \n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = np.zeros((len(dt), len(gt)))\n\n for i, d in enumerate(dt):\n for j, g in enumerate(gt):\n ious[i, j] = self.oks(g, d, method=p.iouType)\n\n return ious\n\n def evaluateImg(self, imgId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n\n for g in gt:\n if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n iscrowd = [int(o['iscrowd']) for o in gt]\n # load computed ious\n ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]\n\n T = len(p.iouThrs)\n G = len(gt)\n D = len(dt)\n gtm = np.zeros((T,G))\n dtm = np.zeros((T,D))\n gtIg = np.array([g['_ignore'] for g in gt])\n dtIg = np.zeros((T,D))\n if not len(ious)==0:\n for tind, t in enumerate(p.iouThrs):\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for gind, g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind,gind]>0 and not iscrowd[gind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n dtIg[tind,dind] = gtIg[m]\n dtm[tind,dind] = gt[m]['id']\n gtm[tind,m] = d['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))\n dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))\n # store results for given image and category\n return {\n 'image_id': imgId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'dtIds': [d['id'] for d in dt],\n 'gtIds': [g['id'] for g in gt],\n 'dtMatches': dtm,\n 'gtMatches': gtm,\n 'dtScores': [d['score'] for d in dt],\n 'gtIgnore': gtIg,\n 'dtIgnore': dtIg,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results...')\n tic = time.time()\n if not self.evalImgs:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.imgIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.imgIds) if i in setI]\n I0 = len(_pe.imgIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n\n dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]\n dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]\n gtIg = np.concatenate([e['gtIgnore'] for e in E])\n npig = np.count_nonzero(gtIg==0 )\n if npig == 0:\n continue\n tps = np.logical_and( dtm, np.logical_not(dtIg) )\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n self.eval = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n \n def _summarizeDets():\n stats = np.zeros((12,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])\n stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])\n return stats\n def _summarizeKps():\n stats = np.zeros((10,))\n stats[0] = _summarize(1, maxDets=self.params.maxDets[0])\n stats[1] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=.5)\n stats[2] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=.75)\n stats[3] = _summarize(1, maxDets=self.params.maxDets[0], areaRng='medium')\n stats[4] = _summarize(1, maxDets=self.params.maxDets[0], areaRng='large')\n stats[5] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=.5)\n stats[7] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=.75)\n stats[8] = _summarize(0, maxDets=self.params.maxDets[0], areaRng='medium')\n stats[9] = _summarize(0, maxDets=self.params.maxDets[0], areaRng='large')\n return stats\n\n if not self.eval:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n elif iouType == 'keypoints':\n summarize = _summarizeKps\n\n self.stats = summarize()\n\n def new_eval(self):\n \"\"\"\n Custom evaluation using KM algorithm, generating potential matches\n \"\"\"\n # load_res ensures images in dt is subset of gt\n gt = self.cocoGt\n dt = self.cocoDt\n img_ids = gt.getImgIds()\n\n print('----------------- start evaluating -------------------------')\n for img_id in tqdm(img_ids):\n gts = gt.loadAnns(gt.getAnnIds([img_id]))\n dts = dt.loadAnns(dt.getAnnIds([img_id]))\n\n if len(gts) == 0 or len(dts) == 0:\n self.MATCH[img_id] = []\n continue\n\n self.MATCH[img_id] = self.metric_by_frame(gts, dts, self.iou_type)\n print('--------------- evaluation finished! -----------------------')\n self.__evaluated = True\n\n def __check_evaluated(self):\n if not self.__evaluated:\n print('[WRN] current dataset not evaluated, start evaluation.')\n self.new_eval()\n\n def new_summ(self, img_ids=[]):\n \"\"\"\n Summarize using new evaluation method\n \"\"\"\n self.__check_evaluated()\n \n if len(img_ids) == 0:\n img_ids = self.cocoGt.getImgIds()\n \n # initialize summary matrix\n _summary = {}\n for i in range(10):\n # with respect to iou from 0.5 to 0.95\n _summary[str(50 + 5 * i)] = {\n 'TP': 0, # true Positive\n 'FP': 0, # mAP\n 'FN': 0, # mAR\n }\n _summary['avg'] = {'TP': 0, 'FP': 0, 'FN': 0}\n\n for img_id in img_ids:\n _match = self.MATCH[img_id]\n num_gts = len(self.cocoGt.getAnnIds([img_id]))\n num_dts = len(self.cocoDt.getAnnIds([img_id]))\n\n for i in range(10):\n _thresh = 0.5 + 0.05 * i\n\n _tp = len([_ for _ in _match if _['oks'] >= _thresh])\n _fp = num_dts - _tp\n _fn = num_gts - _tp\n\n assert _tp >= 0 and _fp >= 0 and _fn >= 0, 'Debug here!'\n\n _summary[str(50 + 5 * i)]['TP'] += _tp\n _summary[str(50 + 5 * i)]['FP'] += _fp\n _summary[str(50 + 5 * i)]['FN'] += _fn\n\n _summary['avg']['TP'] += _tp\n _summary['avg']['FP'] += _fp\n _summary['avg']['FN'] += _fn\n \n _mAP = _summary['avg']['TP'] / ( _summary['avg']['FP'] + _summary['avg']['TP'] + 1e-6) * 100\n _mAP_5 = _summary['50']['TP'] / ( _summary['50']['FP'] + _summary['50']['TP'] + 1e-6) * 100\n _mAP_75 = _summary['75']['TP'] / ( _summary['75']['FP'] + _summary['75']['TP'] + 1e-6) * 100\n _mAP_90 = _summary['90']['TP'] / ( _summary['90']['FP'] + _summary['90']['TP'] + 1e-6) * 100\n _mAR = _summary['avg']['TP'] / ( _summary['avg']['FN'] + _summary['avg']['TP'] + 1e-6) * 100\n _mAR_5 = _summary['50']['TP'] / ( _summary['50']['FN'] + _summary['50']['TP'] + 1e-6) * 100\n _mAR_75 = _summary['75']['TP'] / ( _summary['75']['FN'] + _summary['75']['TP'] + 1e-6) * 100\n _mAR_90 = _summary['90']['TP'] / ( _summary['90']['FN'] + _summary['90']['TP'] + 1e-6) * 100\n\n _mf1 = 2 * _mAP * _mAR / (_mAP + _mAR + 1e-6)\n\n msg = f'mAP: {_mAP:.2f} mAR: {_mAR:.2f} mF1: {_mf1:.2f}'\n\n return msg, _summary\n\n def new_combine(self, prob=0.1):\n self.__check_evaluated()\n\n img_ids = sorted(self.cocoGt.getImgIds())\n combined_anns = []\n\n # smooth correct detection\n for img_id in tqdm(img_ids):\n _match = self.MATCH[img_id]\n\n _matched_gt = [_['gt_id'] for _ in _match]\n _matched_dt = [_['dt_id'] for _ in _match]\n _matched_score = [_['oks'] for _ in _match]\n\n for _gi, _di in zip(_matched_gt, _matched_dt):\n gt = self.cocoGt.loadAnns([_gi])[0]\n dt = self.cocoDt.loadAnns([_di])[0]\n\n combined_ann = copy.deepcopy(gt)\n combined_ann['bbox'] = [(g+d)/2 for g, d in zip(gt['bbox'], dt['bbox'])]\n\n combined_anns.append(combined_ann)\n \n # add false negtives\n for gt in self.cocoGt.loadAnns(self.cocoGt.getAnnIds([img_id])):\n if gt['id'] in _matched_gt:\n continue\n\n combined_anns.append(gt)\n\n # add false positives\n for dt in self.cocoDt.loadAnns(self.cocoDt.getAnnIds([img_id])):\n if dt['id'] in _matched_dt:\n continue\n\n if np.random.rand() <= prob:\n combined_anns.append(dt)\n\n return self.cocoGt.load_res(combined_anns)\n \n \n def viz(self, img_ids=[], show_bbox=False, show_bbox_only=True, shuffle=False, thresh=0.75, resize=True):\n self.__check_evaluated()\n\n if self.iou_type == 'bbox' and not show_bbox:\n print('[WRN]: IoU type bbox but not showing bbox!')\n\n if show_bbox_only and not show_bbox:\n print('[WRN]: Not showing anything!')\n\n if len(img_ids) == 0:\n img_ids = self.cocoGt.getImgIds()\n\n if shuffle:\n random.shuffle(img_ids)\n else:\n img_ids = sorted(img_ids)\n\n for img_id in img_ids:\n img = imread(self.cocoGt._get_abs_name(img_id))\n\n _match = self.MATCH[img_id]\n\n _matched_gt = [_['gt_id'] for _ in _match]\n _matched_dt = [_['dt_id'] for _ in _match]\n _matched_score = [_['oks'] for _ in _match]\n\n for _gi, _di, _s in zip(_matched_gt, _matched_dt, _matched_score):\n gt = self.cocoGt.loadAnns([_gi])[0]\n dt = self.cocoDt.loadAnns([_di])[0]\n\n if _s >= thresh:\n img = self.cocoGt.viz_anns(img, [gt], color=BLUE, show_bbox=show_bbox, show_bbox_only=show_bbox_only)\n img = self.cocoDt.viz_anns(img, [dt], color=YELLOW, show_bbox=show_bbox, show_bbox_only=show_bbox_only)\n else:\n img = self.cocoGt.viz_anns(img, [gt], color=GREEN, show_bbox=show_bbox, show_bbox_only=show_bbox_only)\n img = self.cocoDt.viz_anns(img, [dt], color=RED, show_bbox=show_bbox, show_bbox_only=show_bbox_only)\n\n for gt in self.cocoGt.loadAnns(self.cocoGt.getAnnIds([img_id])):\n if gt['id'] in _matched_gt:\n continue\n\n img = self.cocoGt.viz_anns(img, [gt], color=GREEN, show_bbox=show_bbox, show_bbox_only=show_bbox_only)\n \n for dt in self.cocoDt.loadAnns(self.cocoDt.getAnnIds([img_id])):\n if dt['id'] in _matched_dt:\n continue\n\n img = self.cocoDt.viz_anns(img, [dt], color=RED, show_bbox=show_bbox, show_bbox_only=show_bbox_only)\n\n if resize:\n scale = np.sqrt(img.shape[0] * img.shape[1] / 960 / 810)\n img = cv2.resize(img, (int(img.shape[1]/scale), int(img.shape[0]/scale)))\n\n print('Image id: ' + img_id)\n cv2.imshow('_', img)\n if cv2.waitKey(0) == 27: break\n \n def oks(self, gt, dt, method='keypoints'):\n '''\n calculate IOU/OKS between specific ground truth and detection\n '''\n if method == 'bbox':\n return iou(gt['bbox'], dt['bbox'], xywh=True)\n elif method == 'keypoints':\n var = (self.params.kpt_oks_sigmas * 2) ** 2\n\n g = np.array(gt['keypoints'])\n xg = g[0::3]; yg = g[1::3]; vg = g[2::3]\n k1 = np.count_nonzero(vg > 0)\n\n x, y, w, h = gt['bbox']\n x0 = x-w; x1 = x+w*2\n y0 = y-h; y1 = y+w*2\n\n d = np.array(dt['keypoints'])\n xd = d[0::3]; yd = d[1::3]\n if k1 > 0:\n dx = xd - xg; dy = yd - yg\n else:\n z = np.zeros((len(self.params.kpt_oks_sigmas)))\n dx = np.max((z, x0-xd), axis=0)+np.max((z, xd-x1), axis=0)\n dy = np.max((z, y0-yd), axis=0)+np.max((z, yd-y1), axis=0)\n\n e = (dx**2 + dy**2) / var / (gt['area'] + np.spacing(1)) / 2\n \n if k1 > 0:\n e = e[vg > 0]\n\n _oks = np.mean([np.exp(- i) if i <= 10 else 0 for i in e])\n\n return _oks\n else:\n raise NotImplementedError(\"IOU method not implemented!\")\n\n def metric_by_frame(self, gts, dts, method='keypoints'):\n \"\"\"\n return matched ground truth and detects with only scores exceeding 0.5 OKS\n \"\"\"\n assert set([_['image_id'] for _ in gts]) == set([_['image_id'] for _ in dts]), 'gt and dt from different image!'\n assert len(gts), \"No ground truth!\"\n assert len(dts), \"No detections!\"\n\n metric = - np.ones((len(gts), len(dts)))\n\n for i in range(len(gts)):\n for j in range(len(dts)):\n metric[i, j] = self.oks(gts[i], dts[j], method)\n\n\n gt_idx, dt_idx = KM(metric, maximize=True)\n match = []\n\n for i, j in zip(gt_idx, dt_idx):\n if metric[i, j] >= 0.5:\n match.append({\n 'gt_id': gts[i]['id'],\n 'dt_id': dts[j]['id'],\n 'oks': metric[i, j]\n })\n\n return match\n\n def __str__(self):\n self.summarize()"
] |
[
[
"numpy.sqrt",
"numpy.cumsum",
"numpy.concatenate",
"numpy.max",
"numpy.mean",
"numpy.searchsorted",
"scipy.optimize.linear_sum_assignment",
"numpy.exp",
"numpy.where",
"numpy.unique",
"numpy.arange",
"numpy.count_nonzero",
"numpy.repeat",
"numpy.zeros",
"numpy.logical_not",
"numpy.spacing",
"numpy.random.rand",
"numpy.argsort",
"numpy.array",
"numpy.ones"
]
] |
apache8080/adl_classification
|
[
"8d4bab29998d71e783c4a27a79af33bc153e7fa1"
] |
[
"cluster.py"
] |
[
"import numpy as np\nimport pandas as pd\nfrom sklearn.datasets import load_digits\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nfrom sklearn.ensemble import RandomForestClassifier\nfrom scipy.spatial.distance import cdist\nimport matplotlib.pyplot as plt\nimport os\n\nclass cluster():\n def __init__(self, sklearn_load_ds, targets, random):\n data = sklearn_load_ds\n X = pd.DataFrame(data)\n self.X_data = sklearn_load_ds\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, targets, test_size=0.3, random_state = random)\n self.random = random\n \n def classify(self, model):\n model.fit(self.X_train, self.y_train)\n y_pred = model.predict(self.X_test)\n accuracy = accuracy_score(self.y_test, y_pred)\n return accuracy, confusion_matrix(self.y_test, y_pred)\n #print('Accuracy: {}'.format(accuracy_score(self.y_test, y_pred)))\n #print(confusion_matrix(self.y_test, y_pred))\n\n def Kmeans(self, output='add', tune=1, k=1):\n n_clusters = int(k*tune)\n clf = KMeans(n_clusters = n_clusters, random_state = self.random, n_jobs=2)\n clf.fit(self.X_train)\n y_labels_train = clf.labels_\n y_labels_test = clf.predict(self.X_test)\n if output == 'a':\n self.X_train['km_clust'] = y_labels_train\n self.X_test['km_clust'] = y_labels_test\n elif output == 'r':\n self.X_train = y_labels_train[:, np.newaxis]\n self.X_test = y_labels_test[:, np.newaxis]\n return self\n\n #def find_k(self, max_k = 50):\n # for i in range(max_k):\n \n def Kmeans_test(self, k = 1):\n n_clusters = k\n clf = KMeans(n_clusters = n_clusters, random_state = self.random, n_jobs = 2)\n clf.fit(self.X_train)\n return sum(np.min(cdist(self.X_train, clf.cluster_centers_, 'euclidean'), axis = 1))\n \n#32x3 data setup\ndef parse_data():\n frames = []\n files = {\"Brush_teeth\":[], \"Climb_stairs\":[], \"Comb_hair\":[], \"Descend_stairs\":[], \"Drink_glass\":[], \"Eat_meat\":[], \"Eat_soup\":[], \"Getup_bed\":[], \"Liedown_bed\":[], \"Pour_water\":[], \"Sitdown_chair\":[], \"Standup_chair\":[], \"Use_telephone\":[], \"Walk\":[]}\n for k, v in files.items():\n dir_name = \"HMP_Dataset/\" + k\n files[k] = os.listdir(dir_name)\n\n dfs = {}\n sizes = []\n for k, v in files.items():\n dir_name = \"HMP_Dataset/\"+k\n frames = []\n for f in v:\n fname = dir_name + \"/\" + f\n vals = pd.read_table(fname, sep='\\s+').values\n rows = vals.shape[0]\n chunks = int(rows/32)\n chunk = []\n for i in range(chunks):\n for j in range(32):\n index = i + j\n val = vals[index]\n for x in range(3):\n chunk.append(val[x])\n frames.append(chunk)\n chunk = []\n dfs[k] = np.matrix(frames)\n sizes.append(dfs[k].shape[0])\n \n matrices = []\n for k, v in dfs.items():\n matrices.append(v)\n data = np.concatenate(matrices)\n\n target = []\n n = 0\n for s in sizes:\n for i in range(s):\n target.append(n)\n n += 1\n \n return data, target\n \n# plots the distortion for x k-means so a user can use the elbow method\ndef elbow_test(data, max_k):\n K = range(1, max_k + 1)\n distortions = []\n for i in range(max_k):\n i += 1\n print(i)\n distortion = cluster(data, targets, 50).Kmeans_test(k = i)\n distortions.append(distortion)\n plt.plot(K, distortions, 'bx-')\n plt.xlabel('k')\n plt.ylabel('Distortion')\n plt.title('The Elbow Method showing the optimal k')\n plt.show()\n\n# finds the max k by trying as 1 to max_k k values\n# and finding what made the classifier the most accurate\ndef find_max_k_brute_force(data, targets, max_k, out='a'):\n accuracy = 0.0\n confusion_matrix = 0\n final_tuning_val = 0\n for i in range(max_k):\n i += 1\n print(i)\n a, cmat = cluster(data, targets, 50).Kmeans(output=out, k=i).classify(model = RandomForestClassifier(n_estimators=100, max_depth=50, random_state=50))\n if a > accuracy:\n accuracy = a\n confusion_matrix = cmat\n final_tuning_val = i\n print('Final d = {}'.format(96))\n print('Final k = {}'.format(k_const*final_tuning_val))\n print('Final Accuracy: {}'.format(accuracy))\n print('Confusion Matrix: \\n')\n print(confusion_matrix)\n\n# A final run using 14, 20, 644 as the k-values\n# Also runs k-means being added as a feature vs k-means being removed as a feature\n# Finally runs the random forrest without k-means\n# all of these runs are used for final comparison in the short writeup\ndef final_run(d, targets):\n k_vals=[14, 20, 644]\n print(\"K-Means + Random Forest on original data and K-means data as added feature\")\n for i in k_vals:\n #a, cmat = cluster(d, targets, 50).Kmeans(output='r', tune=i, k=k_const).classify(model = RandomForestClassifier(n_estimators=100, max_depth=50, random_state=50))\n a, cmat = cluster(d, targets, 50).Kmeans(output='a', k=i).classify(model = RandomForestClassifier(n_estimators=100, max_depth=50, random_state=50))\n print('Final d = {}'.format(96))\n print('Final k = {}'.format(i))\n print('Final Accuracy: {}'.format(a))\n print('Confusion Matrix: \\n')\n print(cmat)\n\n print(\"\\n K-Means + Random Forest only on K-means data\")\n a, cmat = cluster(d, targets, 50).Kmeans(output='r', k=658).classify(model = RandomForestClassifier(n_estimators=100, max_depth=50, random_state=50))\n print('Final d = {}'.format(96))\n print('Final k = {}'.format(658))\n print('Final Accuracy: {}'.format(a))\n print('Confusion Matrix: \\n')\n print(cmat)\n\n print(\"\\n Only Random Forest Classifier: \")\n a, cmat = cluster(d, targets, 50).classify(model = RandomForestClassifier(n_estimators=100, max_depth=50, random_state=50))\n print('Final d = n/a'.format(96))\n print('Final k = n/a'.format(i))\n print('Final Accuracy: {}'.format(a))\n print('Confusion Matrix: \\n')\n print(cmat)\n\ndef main():\n d, targets = parse_data()\n # elbow_test(d, 50) # graphs the distortions for k = 1 - k = 50 and then shows the graph to use the elbow test\n final_run(d, targets)\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.matrix",
"matplotlib.pyplot.title",
"sklearn.cluster.KMeans",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.metrics.accuracy_score",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.metrics.confusion_matrix",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"scipy.spatial.distance.cdist",
"pandas.read_table",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
rowedenny/ULTRA_pytorch
|
[
"4c7bcbe0e3afb9c1abf627b002f90b85c1c7c3ff"
] |
[
"ultra/input_layer/stochastic_online_simulation_feed.py"
] |
[
"\"\"\"Simulate online learning process and click data based on human annotations.\n\nSee the following paper for more information on the simulation data.\n\n * Qingyao Ai, Keping Bi, Cheng Luo, Jiafeng Guo, W. Bruce Croft. 2018. Unbiased Learning to Rank with Unbiased Propensity Estimation. In Proceedings of SIGIR '18\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\nimport json\nimport numpy as np\nimport torch\nfrom ultra.input_layer import BaseInputFeed\nfrom ultra.utils import click_models as cm\nfrom ultra.utils.team_draft_interleave import TeamDraftInterleaving\nimport ultra\n\n\nclass StochasticOnlineSimulationFeed(BaseInputFeed):\n \"\"\"Simulate online learning to rank and click data based on human annotations.\n\n This class implements a input layer for online learning to rank experiments\n by simulating click data based on both the human relevance annotation of\n each query-document pair and a predefined click model.\n \"\"\"\n\n def __init__(self, model, batch_size, hparam_str):\n \"\"\"Create the model.\n\n Args:\n model: (BasicModel) The model we are going to train.\n batch_size: the size of the batches generated in each iteration.\n hparam_str: the hyper-parameters for the input layer.\n session: the current tensorflow Session (used for online learning).\n \"\"\"\n self.hparams = ultra.utils.hparams.HParams(\n # the setting file for the predefined click models.\n click_model_json='./example/ClickModel/pbm_0.1_1.0_4_1.0.json',\n # Scalar for the probability distribution.\n tau=1,\n # Set True to feed relevance labels instead of simulated clicks.\n oracle_mode=False,\n # Set eta change step for dynamic bias severity in training, 0.0\n # means no change.\n dynamic_bias_eta_change=0.0,\n # Set how many steps to change eta for dynamic bias severity in\n # training, 0.0 means no change.\n dynamic_bias_step_interval=1000,\n )\n\n print('Create online stochastic simluation feed')\n print(hparam_str)\n self.hparams.parse(hparam_str)\n self.click_model = None\n with open(self.hparams.click_model_json) as fin:\n model_desc = json.load(fin)\n self.click_model = cm.loadModelFromJson(model_desc)\n self.start_index = 0\n self.count = 1\n self.rank_list_size = model.rank_list_size\n self.max_candidate_num = model.max_candidate_num\n self.feature_size = model.feature_size\n self.batch_size = batch_size\n self.model = model\n self.global_batch_count = 0\n\n # Check whether the model needs result interleaving.\n self.need_interleave = False\n if hasattr(model.hparams, 'need_interleave'):\n self.need_interleave = model.hparams.need_interleave\n print('Online simulation with interleaving: %s' %\n (str(self.need_interleave)))\n if self.need_interleave:\n self.interleaving = TeamDraftInterleaving()\n\n def prepare_true_labels_with_index(\n self, data_set, index, docid_inputs, letor_features, labels, check_validation=False):\n i = index\n # Generate label list.\n label_list = [\n 0 if data_set.initial_list[i][x] < 0 else data_set.labels[i][x] for x in range(\n self.max_candidate_num)]\n\n # Check if data is valid\n if check_validation and sum(label_list) == 0:\n return\n base = len(letor_features)\n for x in range(self.max_candidate_num):\n if data_set.initial_list[i][x] >= 0:\n letor_features.append(\n data_set.features[data_set.initial_list[i][x]])\n docid_inputs.append(list([-1 if data_set.initial_list[i][x]\n < 0 else base + x for x in range(self.max_candidate_num)]))\n labels.append(label_list)\n\n def simulate_clicks_online(self, input_feed, check_validation=False):\n \"\"\"Simulate online environment by reranking documents and collect clicks.\n\n Args:\n input_feed: (dict) The input_feed data.\n check_validation: (bool) Set True to ignore data with no positive labels.\n\n Returns:\n input_feed: a feed dictionary for the next step\n info_map: a dictionary contain some basic information about the batch (for debugging).\n\n \"\"\"\n # Compute ranking scores with input_feed\n rank_scores = self.model.validation(input_feed, True)[1]\n if self.model.is_cuda_avail:\n rank_scores = rank_scores.cpu()\n\n # Rerank documents and collect clicks\n letor_features_length = len(input_feed[self.model.letor_features_name])\n local_batch_size = len(input_feed[self.model.docid_inputs_name[0]])\n for i in range(local_batch_size):\n # Get valid doc index\n valid_idx = self.max_candidate_num - 1\n while valid_idx > -1:\n if input_feed[self.model.docid_inputs_name[valid_idx]][i] < letor_features_length: # a valid doc\n break\n valid_idx -= 1\n list_len = valid_idx + 1\n\n def plackett_luce_sampling(score_list):\n # Sample document ranking\n scores = score_list[:list_len]\n scores = scores - max(scores)\n exp_scores = np.exp(self.hparams.tau * scores)\n exp_scores = exp_scores.numpy()\n probs = exp_scores / np.sum(exp_scores)\n re_list = np.random.choice(np.arange(list_len),\n replace=False,\n p=probs,\n size=np.count_nonzero(probs))\n # Append unselected documents to the end\n used_indexs = set(re_list)\n unused_indexs = []\n for tmp_index in range(list_len):\n if tmp_index not in used_indexs:\n unused_indexs.append(tmp_index)\n re_list = np.append(re_list, unused_indexs).astype(int)\n return re_list\n\n rerank_list = plackett_luce_sampling(rank_scores[i])\n\n # Rerank documents\n new_docid_list = np.zeros(list_len)\n new_label_list = np.zeros(list_len)\n for j in range(list_len):\n new_docid_list[j] = input_feed[self.model.docid_inputs_name[rerank_list[j]]][i]\n new_label_list[j] = input_feed[self.model.labels_name[rerank_list[j]]][i]\n # Collect clicks online\n click_list = None\n if self.hparams.oracle_mode:\n click_list = new_label_list[:self.rank_list_size]\n else:\n click_list, _, _ = self.click_model.sampleClicksForOneList(\n new_label_list[:self.rank_list_size])\n sample_count = 0\n while check_validation and sum(\n click_list) == 0 and sample_count < self.MAX_SAMPLE_ROUND_NUM:\n click_list, _, _ = self.click_model.sampleClicksForOneList(\n new_label_list[:self.rank_list_size])\n sample_count += 1\n # update input_feed\n for j in range(list_len):\n input_feed[self.model.docid_inputs_name[j]][i] = new_docid_list[j]\n if j < self.rank_list_size:\n input_feed[self.model.labels_name[j]][i] = click_list[j]\n else:\n input_feed[self.model.labels_name[j]][i] = 0\n return input_feed\n\n def get_batch(self, data_set, check_validation=False, data_format = \"ULTRA\"):\n \"\"\"Get a random batch of data, prepare for step. Typically used for training.\n\n To feed data in step(..) it must be a list of batch-major vectors, while\n data here contains single length-major cases. So the main logic of this\n function is to re-index data cases to be in the proper format for feeding.\n\n Args:\n data_set: (Raw_data) The dataset used to build the input layer.\n check_validation: (bool) Set True to ignore data with no positive labels.\n\n Returns:\n input_feed: a feed dictionary for the next step\n info_map: a dictionary contain some basic information about the batch (for debugging).\n\n \"\"\"\n\n if len(data_set.initial_list[0]) < self.rank_list_size:\n raise ValueError(\"Input ranklist length must be no less than the required list size,\"\n \" %d != %d.\" % (len(data_set.initial_list[0]), self.rank_list_size))\n length = len(data_set.initial_list)\n docid_inputs, letor_features, labels = [], [], []\n rank_list_idxs = []\n for _ in range(self.batch_size):\n i = int(random.random() * length)\n rank_list_idxs.append(i)\n self.prepare_true_labels_with_index(data_set, i,\n docid_inputs, letor_features, labels, check_validation)\n local_batch_size = len(docid_inputs)\n letor_features_length = len(letor_features)\n for i in range(local_batch_size):\n for j in range(self.max_candidate_num):\n if docid_inputs[i][j] < 0:\n docid_inputs[i][j] = letor_features_length\n\n batch_docid_inputs = []\n batch_labels = []\n for length_idx in range(self.max_candidate_num):\n # Batch encoder inputs are just re-indexed docid_inputs.\n batch_docid_inputs.append(\n np.array([docid_inputs[batch_idx][length_idx]\n for batch_idx in range(local_batch_size)], dtype=np.float32))\n # Batch decoder inputs are re-indexed decoder_inputs, we create\n # labels.\n batch_labels.append(\n np.array([labels[batch_idx][length_idx]\n for batch_idx in range(local_batch_size)], dtype=np.float32))\n # Create input feed map\n input_feed = {}\n input_feed[self.model.letor_features_name] = np.array(letor_features)\n for l in range(self.max_candidate_num):\n input_feed[self.model.docid_inputs_name[l]] = batch_docid_inputs[l]\n input_feed[self.model.labels_name[l]] = batch_labels[l]\n\n # Simulate online environment and collect clicks.\n input_feed = self.simulate_clicks_online(input_feed, check_validation)\n\n # Create info_map to store other information\n info_map = {\n 'rank_list_idxs': rank_list_idxs,\n 'input_list': docid_inputs,\n 'click_list': labels,\n 'letor_features': letor_features\n }\n\n self.global_batch_count += 1\n if self.hparams.dynamic_bias_eta_change != 0:\n if self.global_batch_count % self.hparams.dynamic_bias_step_interval == 0:\n self.click_model.eta += self.hparams.dynamic_bias_eta_change\n self.click_model.setExamProb(self.click_model.eta)\n print(\n 'Dynamically change bias severity eta to %.3f' %\n self.click_model.eta)\n return input_feed, info_map\n\n def get_next_batch(self, index, data_set, check_validation=False, data_format = \"ULTRA\"):\n \"\"\"Get the next batch of data from a specific index, prepare for step.\n Typically used for validation.\n\n To feed data in step(..) it must be a list of batch-major vectors, while\n data here contains single length-major cases. So the main logic of this\n function is to re-index data cases to be in the proper format for feeding.\n\n Args:\n index: the index of the data before which we will use to create the data batch.\n data_set: (Raw_data) The dataset used to build the input layer.\n check_validation: (bool) Set True to ignore data with no positive labels.\n\n Returns:\n input_feed: a feed dictionary for the next step\n info_map: a dictionary contain some basic information about the batch (for debugging).\n\n \"\"\"\n if len(data_set.initial_list[0]) < self.rank_list_size:\n raise ValueError(\"Input ranklist length must be no less than the required list size,\"\n \" %d != %d.\" % (len(data_set.initial_list[0]), self.rank_list_size))\n\n docid_inputs, letor_features, labels = [], [], []\n\n num_remain_data = len(data_set.initial_list) - index\n for offset in range(min(self.batch_size, num_remain_data)):\n i = index + offset\n self.prepare_true_labels_with_index(\n data_set, i, docid_inputs, letor_features, labels, check_validation)\n\n local_batch_size = len(docid_inputs)\n letor_features_length = len(letor_features)\n for i in range(local_batch_size):\n for j in range(self.max_candidate_num):\n if docid_inputs[i][j] < 0:\n docid_inputs[i][j] = letor_features_length\n\n batch_docid_inputs = []\n batch_labels = []\n for length_idx in range(self.max_candidate_num):\n # Batch encoder inputs are just re-indexed docid_inputs.\n batch_docid_inputs.append(\n np.array([docid_inputs[batch_idx][length_idx]\n for batch_idx in range(local_batch_size)], dtype=np.float32))\n # Batch decoder inputs are re-indexed decoder_inputs, we create\n # weights.\n batch_labels.append(\n np.array([labels[batch_idx][length_idx]\n for batch_idx in range(local_batch_size)], dtype=np.float32))\n # Create input feed map\n input_feed = {}\n input_feed[self.model.letor_features.name] = np.array(letor_features)\n for l in range(self.max_candidate_num):\n input_feed[self.model.docid_inputs_name[l]] = batch_docid_inputs[l]\n input_feed[self.model.labels_name[l]] = batch_labels[l]\n\n # Simulate online environment and collect clicks.\n input_feed = self.simulate_clicks_online(input_feed, check_validation)\n\n # Create others_map to store other information\n others_map = {\n 'input_list': docid_inputs,\n 'click_list': labels,\n }\n\n return input_feed, others_map\n\n def get_data_by_index(self, data_set, index, check_validation=False):\n \"\"\"Get one data from the specified index, prepare for step.\n\n Args:\n data_set: (Raw_data) The dataset used to build the input layer.\n index: the index of the data\n check_validation: (bool) Set True to ignore data with no positive labels.\n\n Returns:\n The triple (docid_inputs, decoder_inputs, target_weights) for\n the constructed batch that has the proper format to call step(...) later.\n \"\"\"\n if len(data_set.initial_list[0]) < self.rank_list_size:\n raise ValueError(\"Input ranklist length must be no less than the required list size,\"\n \" %d != %d.\" % (len(data_set.initial_list[0]), self.rank_list_size))\n\n docid_inputs, letor_features, labels = [], [], []\n\n i = index\n self.prepare_true_labels_with_index(\n data_set,\n i,\n docid_inputs,\n letor_features,\n labels,\n check_validation)\n\n letor_features_length = len(letor_features)\n for j in range(self.max_candidate_num):\n if docid_inputs[-1][j] < 0:\n docid_inputs[-1][j] = letor_features_length\n\n batch_docid_inputs = []\n batch_labels = []\n for length_idx in range(self.max_candidate_num):\n # Batch encoder inputs are just re-indexed docid_inputs.\n batch_docid_inputs.append(\n np.array([docid_inputs[batch_idx][length_idx]\n for batch_idx in range(1)], dtype=np.float32))\n # Batch decoder inputs are re-indexed decoder_inputs, we create\n # weights.\n batch_labels.append(\n np.array([labels[batch_idx][length_idx]\n for batch_idx in range(1)], dtype=np.float32))\n # Create input feed map\n input_feed = {}\n input_feed[self.model.letor_features.name] = np.array(letor_features)\n for l in range(self.max_candidate_num):\n input_feed[self.model.docid_inputs_name[l]] = batch_docid_inputs[l]\n input_feed[self.model.labels_name[l]] = batch_labels[l]\n\n # Simulate online environment and collect clicks.\n input_feed = self.simulate_clicks_online(input_feed, check_validation)\n\n # Create others_map to store other information\n others_map = {\n 'input_list': docid_inputs,\n 'click_list': labels,\n }\n\n return input_feed, others_map\n"
] |
[
[
"numpy.arange",
"numpy.append",
"numpy.count_nonzero",
"numpy.array",
"numpy.exp",
"numpy.zeros",
"numpy.sum"
]
] |
BerkeleyAutomation/rlqp-python
|
[
"55f378e496979bd00e84cea4583ac37bfaa571a9"
] |
[
"module/tests/polishing_test.py"
] |
[
"# Test osqp python module\nimport rlqp as osqp\nfrom rlqp.tests.utils import solve_high_accuracy, rel_tol, abs_tol, decimal_tol\n# import rlqp as osqppurepy as osqp\nimport numpy as np\nfrom scipy import sparse\nimport scipy as sp\n\n# Unit Test\nimport unittest\nimport numpy.testing as nptest\n\n\nclass polish_tests(unittest.TestCase):\n\n def setUp(self):\n \"\"\"\n Setup default options\n \"\"\"\n self.opts = {'verbose': False,\n 'eps_abs': 1e-03,\n 'eps_rel': 1e-03,\n 'scaling': True,\n 'rho': 0.1,\n 'alpha': 1.6,\n 'max_iter': 2500,\n 'polish': True,\n 'polish_refine_iter': 4}\n\n def test_polish_simple(self):\n\n # Simple QP problem\n self.P = sparse.diags([11., 0.], format='csc')\n self.q = np.array([3, 4])\n self.A = sparse.csc_matrix(\n [[-1, 0], [0, -1], [-1, -3], [2, 5], [3, 4]])\n self.u = np.array([0, 0, -15, 100, 80])\n self.l = -1e05 * np.ones(len(self.u))\n self.n = self.P.shape[0]\n self.m = self.A.shape[0]\n self.model = osqp.OSQP()\n self.model.setup(P=self.P, q=self.q, A=self.A, l=self.l, u=self.u,\n **self.opts)\n\n # Solve problem\n res = self.model.solve()\n\n x_sol, y_sol, obj_sol = solve_high_accuracy(self.P, self.q, self.A,\n self.l, self.u)\n # Assert close\n nptest.assert_allclose(res.x, x_sol, rtol=rel_tol, atol=abs_tol)\n nptest.assert_allclose(res.y, y_sol, rtol=rel_tol, atol=abs_tol)\n nptest.assert_almost_equal(\n res.info.obj_val, obj_sol, decimal=decimal_tol)\n\n def test_polish_unconstrained(self):\n\n # Unconstrained QP problem\n sp.random.seed(4)\n\n self.n = 30\n self.m = 0\n P = sparse.diags(np.random.rand(self.n)) + 0.2*sparse.eye(self.n)\n self.P = P.tocsc()\n self.q = np.random.randn(self.n)\n self.A = sparse.csc_matrix((self.m, self.n))\n self.l = np.array([])\n self.u = np.array([])\n self.model = osqp.OSQP()\n self.model.setup(P=self.P, q=self.q, A=self.A, l=self.l, u=self.u,\n **self.opts)\n\n # Solve problem\n res = self.model.solve()\n\n x_sol, _, obj_sol = solve_high_accuracy(self.P, self.q, self.A,\n self.l, self.u)\n # Assert close\n nptest.assert_allclose(res.x, x_sol, rtol=rel_tol, atol=abs_tol)\n nptest.assert_almost_equal(\n res.info.obj_val, obj_sol, decimal=decimal_tol)\n\n def test_polish_random(self):\n\n # Random QP problem\n sp.random.seed(6)\n\n self.n = 30\n self.m = 50\n Pt = sparse.random(self.n, self.n)\n self.P = Pt.T @ Pt\n self.q = np.random.randn(self.n)\n self.A = sparse.csc_matrix(np.random.randn(self.m, self.n))\n self.l = -3 + np.random.randn(self.m)\n self.u = 3 + np.random.randn(self.m)\n self.model = osqp.OSQP()\n self.model.setup(P=self.P, q=self.q, A=self.A, l=self.l, u=self.u,\n **self.opts)\n\n # Solve problem\n res = self.model.solve()\n\n x_sol, y_sol, obj_sol = solve_high_accuracy(self.P, self.q, self.A,\n self.l, self.u)\n # Assert close\n nptest.assert_allclose(res.x, x_sol, rtol=rel_tol, atol=abs_tol)\n nptest.assert_allclose(res.y, y_sol, rtol=rel_tol, atol=abs_tol)\n nptest.assert_almost_equal(\n res.info.obj_val, obj_sol, decimal=decimal_tol)\n"
] |
[
[
"scipy.sparse.csc_matrix",
"scipy.random.seed",
"scipy.sparse.eye",
"scipy.sparse.diags",
"numpy.testing.assert_almost_equal",
"scipy.sparse.random",
"numpy.random.randn",
"numpy.random.rand",
"numpy.testing.assert_allclose",
"numpy.array"
]
] |
jsikyoon/dreamer-1
|
[
"d26876c5c723182ba3c19c9e13f7523a6b943515"
] |
[
"dreamer/tools/copy_weights.py"
] |
[
"# Copyright 2019 The Dreamer Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom dreamer.tools import filter_variables_lib\n\n\ndef soft_copy_weights(source_pattern, target_pattern, amount):\n if not (0 < amount <= 0.5 or amount == 1):\n message = 'Copy amount should probably be less than half or everything,'\n message += ' not {}'.format(amount)\n raise ValueError(message)\n source_vars = filter_variables_lib.filter_variables(include=source_pattern)\n target_vars = filter_variables_lib.filter_variables(include=target_pattern)\n source_vars = sorted(source_vars, key=lambda x: x.name)\n target_vars = sorted(target_vars, key=lambda x: x.name)\n assert len(source_vars) == len(target_vars)\n updates = []\n for source, target in zip(source_vars, target_vars):\n assert source.name != target.name\n if amount == 1.0:\n updates.append(target.assign(source))\n else:\n updates.append(target.assign((1 - amount) * target + amount * source))\n return tf.group(*updates)\n"
] |
[
[
"tensorflow.group"
]
] |
hickmank/keras-vis
|
[
"90ae5565951b5e6a90d706b8205c2c4dfc271505"
] |
[
"vis/utils/utils.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\n\nimport os\nimport tempfile\nimport math\nimport json\nimport six\n\nimport numpy as np\nimport matplotlib.font_manager as fontman\n\nfrom skimage import io, transform\nfrom keras import backend as K\nfrom keras.models import load_model\n\nimport logging\nlogger = logging.getLogger(__name__)\n\ntry:\n import PIL as pil\n from PIL import ImageFont\n from PIL import Image\n from PIL import ImageDraw\nexcept ImportError:\n pil = None\n\n\n# Globals\n_CLASS_INDEX = None\n\n\ndef _check_pil():\n if not pil:\n raise ImportError('Failed to import PIL. You must install Pillow')\n\n\ndef _find_font_file(query):\n \"\"\"Utility to find font file.\n \"\"\"\n return list(filter(lambda path: query.lower() in os.path.basename(path).lower(), fontman.findSystemFonts()))\n\n\ndef reverse_enumerate(iterable):\n \"\"\"Enumerate over an iterable in reverse order while retaining proper indexes, without creating any copies.\n \"\"\"\n return zip(reversed(range(len(iterable))), reversed(iterable))\n\n\ndef listify(value):\n \"\"\"Ensures that the value is a list. If it is not a list, it creates a new list with `value` as an item.\n \"\"\"\n if not isinstance(value, list):\n value = [value]\n return value\n\n\ndef add_defaults_to_kwargs(defaults, **kwargs):\n \"\"\"Updates `kwargs` with dict of `defaults`\n\n Args:\n defaults: A dictionary of keys and values\n **kwargs: The kwargs to update.\n\n Returns:\n The updated kwargs.\n \"\"\"\n defaults = dict(defaults)\n defaults.update(kwargs)\n return defaults\n\n\ndef get_identifier(identifier, module_globals, module_name):\n \"\"\"Helper utility to retrieve the callable function associated with a string identifier.\n\n Args:\n identifier: The identifier. Could be a string or function.\n module_globals: The global objects of the module.\n module_name: The module name\n\n Returns:\n The callable associated with the identifier.\n \"\"\"\n if isinstance(identifier, six.string_types):\n fn = module_globals.get(identifier)\n if fn is None:\n raise ValueError('Unknown {}: {}'.format(module_name, identifier))\n return fn\n elif callable(identifier):\n return identifier\n else:\n raise ValueError('Could not interpret identifier')\n\n\ndef apply_modifications(model, custom_objects=None):\n \"\"\"Applies modifications to the model layers to create a new Graph. For example, simply changing\n `model.layers[idx].activation = new activation` does not change the graph. The entire graph needs to be updated\n with modified inbound and outbound tensors because of change in layer building function.\n\n Args:\n model: The `keras.models.Model` instance.\n\n Returns:\n The modified model with changes applied. Does not mutate the original `model`.\n \"\"\"\n # The strategy is to save the modified model and load it back. This is done because setting the activation\n # in a Keras layer doesnt actually change the graph. We have to iterate the entire graph and change the\n # layer inbound and outbound nodes with modified tensors. This is doubly complicated in Keras 2.x since\n # multiple inbound and outbound nodes are allowed with the Graph API.\n model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + '.h5')\n try:\n model.save(model_path)\n return load_model(model_path, custom_objects=custom_objects)\n finally:\n os.remove(model_path)\n\n\ndef random_array(shape, mean=128., std=20.):\n \"\"\"Creates a uniformly distributed random array with the given `mean` and `std`.\n\n Args:\n shape: The desired shape\n mean: The desired mean (Default value = 128)\n std: The desired std (Default value = 20)\n\n Returns: Random numpy array of given `shape` uniformly distributed with desired `mean` and `std`.\n \"\"\"\n x = np.random.random(shape)\n # normalize around mean=0, std=1\n x = (x - np.mean(x)) / (np.std(x) + K.epsilon())\n # and then around the desired mean/std\n x = (x * std) + mean\n return x\n\n\ndef find_layer_idx(model, layer_name):\n \"\"\"Looks up the layer index corresponding to `layer_name` from `model`.\n\n Args:\n model: The `keras.models.Model` instance.\n layer_name: The name of the layer to lookup.\n\n Returns:\n The layer index if found. Raises an exception otherwise.\n \"\"\"\n layer_idx = None\n for idx, layer in enumerate(model.layers):\n if layer.name == layer_name:\n layer_idx = idx\n break\n\n if layer_idx is None:\n raise ValueError(\"No layer with name '{}' within the model\".format(layer_name))\n return layer_idx\n\n\ndef deprocess_input(input_array, input_range=(0, 255)):\n \"\"\"Utility function to scale the `input_array` to `input_range` throwing away high frequency artifacts.\n\n Args:\n input_array: An N-dim numpy array.\n input_range: Specifies the input range as a `(min, max)` tuple to rescale the `input_array`.\n\n Returns:\n The rescaled `input_array`.\n \"\"\"\n # normalize tensor: center on 0., ensure std is 0.1\n input_array = input_array.copy()\n input_array -= input_array.mean()\n input_array /= (input_array.std() + K.epsilon())\n input_array *= 0.1\n\n # clip to [0, 1]\n input_array += 0.5\n input_array = np.clip(input_array, 0, 1)\n\n # Convert to `input_range`\n return (input_range[1] - input_range[0]) * input_array + input_range[0]\n\n\ndef stitch_images(images, margin=5, cols=5):\n \"\"\"Utility function to stitch images together with a `margin`.\n\n Args:\n images: The array of 2D images to stitch.\n margin: The black border margin size between images (Default value = 5)\n cols: Max number of image cols. New row is created when number of images exceed the column size.\n (Default value = 5)\n\n Returns:\n A single numpy image array comprising of input images.\n \"\"\"\n if len(images) == 0:\n return None\n\n h, w, c = images[0].shape\n n_rows = int(math.ceil(len(images) / cols))\n n_cols = min(len(images), cols)\n\n out_w = n_cols * w + (n_cols - 1) * margin\n out_h = n_rows * h + (n_rows - 1) * margin\n stitched_images = np.zeros((out_h, out_w, c), dtype=images[0].dtype)\n\n for row in range(n_rows):\n for col in range(n_cols):\n img_idx = row * cols + col\n if img_idx >= len(images):\n break\n\n stitched_images[(h + margin) * row: (h + margin) * row + h,\n (w + margin) * col: (w + margin) * col + w, :] = images[img_idx]\n\n return stitched_images\n\n\ndef get_img_shape(img):\n \"\"\"Returns image shape in a backend agnostic manner.\n\n Args:\n img: An image tensor of shape: `(channels, image_dims...)` if data_format='channels_first' or\n `(image_dims..., channels)` if data_format='channels_last'.\n\n Returns:\n Tuple containing image shape information in `(samples, channels, image_dims...)` order.\n \"\"\"\n if isinstance(img, np.ndarray):\n shape = img.shape\n else:\n shape = K.int_shape(img)\n\n if K.image_data_format() == 'channels_last':\n shape = list(shape)\n shape.insert(1, shape[-1])\n shape = tuple(shape[:-1])\n return shape\n\n\ndef load_img(path, grayscale=False, target_size=None):\n \"\"\"Utility function to load an image from disk.\n\n Args:\n path: The image file path.\n grayscale: True to convert to grayscale image (Default value = False)\n target_size: (w, h) to resize. (Default value = None)\n\n Returns:\n The loaded numpy image.\n \"\"\"\n img = io.imread(path, grayscale)\n if target_size:\n img = transform.resize(img, target_size, preserve_range=True).astype('uint8')\n return img\n\n\ndef lookup_imagenet_labels(indices):\n \"\"\"Utility function to return the image net label for the final `dense` layer output index.\n\n Args:\n indices: Could be a single value or an array of indices whose labels should be looked up.\n\n Returns:\n Image net label corresponding to the image category.\n \"\"\"\n global _CLASS_INDEX\n if _CLASS_INDEX is None:\n with open(os.path.join(os.path.dirname(__file__), '../../resources/imagenet_class_index.json')) as f:\n _CLASS_INDEX = json.load(f)\n\n indices = listify(indices)\n return [_CLASS_INDEX[str(idx)][1] for idx in indices]\n\n\ndef draw_text(img, text, position=(10, 10), font='FreeSans.ttf', font_size=14, color=(0, 0, 0)):\n \"\"\"Draws text over the image. Requires PIL.\n\n Args:\n img: The image to use.\n text: The text string to overlay.\n position: The text (x, y) position. (Default value = (10, 10))\n font: The ttf or open type font to use. (Default value = 'FreeSans.ttf')\n font_size: The text font size. (Default value = 12)\n color: The (r, g, b) values for text color. (Default value = (0, 0, 0))\n\n Returns: Image overlayed with text.\n \"\"\"\n _check_pil()\n\n font_files = _find_font_file(font)\n if len(font_files) == 0:\n logger.warn(\"Failed to lookup font '{}', falling back to default\".format(font))\n font = ImageFont.load_default()\n else:\n font = ImageFont.truetype(font_files[0], font_size)\n\n # Don't mutate original image\n img = Image.fromarray(img)\n draw = ImageDraw.Draw(img)\n draw.text(position, text, fill=color, font=font)\n return np.asarray(img)\n\n\ndef bgr2rgb(img):\n \"\"\"Converts an RGB image to BGR and vice versa\n\n Args:\n img: Numpy array in RGB or BGR format\n\n Returns: The converted image format\n \"\"\"\n return img[..., ::-1]\n\n\ndef normalize(array, min_value=0., max_value=1.):\n \"\"\"Normalizes the numpy array to (min_value, max_value)\n\n Args:\n array: The numpy array\n min_value: The min value in normalized array (Default value = 0)\n max_value: The max value in normalized array (Default value = 1)\n\n Returns:\n The array normalized to range between (min_value, max_value)\n \"\"\"\n arr_min = np.min(array)\n arr_max = np.max(array)\n normalized = (array - arr_min) / (arr_max - arr_min + K.epsilon())\n return (max_value - min_value) * normalized + min_value\n\n\nclass _BackendAgnosticImageSlice(object):\n \"\"\"Utility class to make image slicing uniform across various `image_data_format`.\n \"\"\"\n\n def __getitem__(self, item_slice):\n \"\"\"Assuming a slice for shape `(samples, channels, image_dims...)`\n \"\"\"\n if K.image_data_format() == 'channels_first':\n return item_slice\n else:\n # Move channel index to last position.\n item_slice = list(item_slice)\n item_slice.append(item_slice.pop(1))\n return tuple(item_slice)\n\n\n\"\"\"Slice utility to make image slicing uniform across various `image_data_format`.\nExample:\n conv_layer[utils.slicer[:, filter_idx, :, :]] will work for both `channels_first` and `channels_last` image\n data formats even though, in tensorflow, slice should be conv_layer[utils.slicer[:, :, :, filter_idx]]\n\"\"\"\nslicer = _BackendAgnosticImageSlice()\n"
] |
[
[
"numpy.random.random",
"numpy.min",
"numpy.asarray",
"numpy.clip",
"numpy.max",
"numpy.std",
"numpy.mean",
"numpy.zeros",
"matplotlib.font_manager.findSystemFonts"
]
] |
olive380/deepface
|
[
"630e8e72f591eee63c724cb8cbcbbc66712f93fc"
] |
[
"deepface/basemodels/DeepID.py"
] |
[
"import os\nfrom pathlib import Path\nimport gdown\nimport zipfile\n\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Conv2D, Activation, Input, Add, MaxPooling2D, Flatten, Dense, Dropout\n\n#-------------------------------------\n\ndef loadModel(url = 'https://drive.google.com/uc?id=1uRLtBCTQQAvHJ_KVrdbRJiCKxU8m5q2J'):\n\t\n\tmyInput = Input(shape=(55, 47, 3))\n\t\n\tx = Conv2D(20, (4, 4), name='Conv1', activation='relu', input_shape=(55, 47, 3))(myInput)\n\tx = MaxPooling2D(pool_size=2, strides=2, name='Pool1')(x)\n\tx = Dropout(rate=0.99, name='D1')(x)\n\t\n\tx = Conv2D(40, (3, 3), name='Conv2', activation='relu')(x)\n\tx = MaxPooling2D(pool_size=2, strides=2, name='Pool2')(x)\n\tx = Dropout(rate=0.99, name='D2')(x)\n\t\n\tx = Conv2D(60, (3, 3), name='Conv3', activation='relu')(x)\n\tx = MaxPooling2D(pool_size=2, strides=2, name='Pool3')(x)\n\tx = Dropout(rate=0.99, name='D3')(x)\n\t\n\tx1 = Flatten()(x)\n\tfc11 = Dense(160, name = 'fc11')(x1)\n\t\n\tx2 = Conv2D(80, (2, 2), name='Conv4', activation='relu')(x)\n\tx2 = Flatten()(x2)\n\tfc12 = Dense(160, name = 'fc12')(x2)\n\t\n\ty = Add()([fc11, fc12])\n\ty = Activation('relu', name = 'deepid')(y)\n\t\n\tmodel = Model(inputs=[myInput], outputs=y)\n\n\t#---------------------------------\n\t\n\thome = str(Path.home())\n\t\n\tif os.path.isfile(home+'/.deepface/weights/deepid_keras_weights.h5') != True:\n\t\tprint(\"deepid_keras_weights.h5 will be downloaded...\")\n\t\t\n\t\toutput = home+'/.deepface/weights/deepid_keras_weights.h5'\n\t\tgdown.download(url, output, quiet=False)\n\t\t\n\tmodel.load_weights(home+'/.deepface/weights/deepid_keras_weights.h5')\t\n\t\n\treturn model"
] |
[
[
"tensorflow.keras.layers.Activation",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Input"
]
] |
chrysm/scikit-multilearn
|
[
"1e17e3e994bd24924a045ba59f933fd728974630"
] |
[
"skmultilearn/base/problem_transformation.py"
] |
[
"import copy\nimport numpy as np\nfrom .base import MLClassifierBase\nfrom ..utils import get_matrix_in_format, matrix_creation_function_for_format\nfrom scipy.sparse import issparse, csr_matrix\nfrom sklearn.base import BaseEstimator, ClassifierMixin\n\n\nclass ProblemTransformationBase(MLClassifierBase):\n \"\"\"Base class providing common functions for multi-label classifiers\n that follow the problem transformation approach.\n\n Problem transformation is the approach in which the\n original multi-label classification problem is transformed into one\n or more single-label problems, which are then solved by single-class\n or multi-class classifiers.\n\n Scikit-multilearn provides a number of such methods:\n\n - :class:`BinaryRelevance` - performs a single-label single-class classification for each label and sums the results :class:`BinaryRelevance`\n - :class:`ClassifierChains` - performs a single-label single-class classification for each label and sums the results :class:`ClassifierChain`\n - :class:`LabelPowerset` - performs a single-label single-class classification for each label and sums the results :class:`LabelPowerset`\n\n Parameters\n ----------\n classifier : scikit classifier type\n The base classifier that will be used in a class, will be automagically put under self.classifier for future access.\n require_dense : boolean (default is False)\n Whether the base classifier requires input as dense arrays.\n \"\"\"\n\n def __init__(self, classifier=None, require_dense=None):\n\n super(ProblemTransformationBase, self).__init__()\n\n self.copyable_attrs = [\"classifier\", \"require_dense\"]\n\n self.classifier = classifier\n if require_dense is not None:\n if isinstance(require_dense, bool):\n self.require_dense = [require_dense, require_dense]\n else:\n assert len(require_dense) == 2 and isinstance(\n require_dense[0], bool) and isinstance(require_dense[1], bool)\n self.require_dense = require_dense\n\n else:\n if isinstance(self.classifier, MLClassifierBase):\n self.require_dense = [False, False]\n else:\n self.require_dense = [True, True]\n\n def ensure_multi_label_from_single_class(self, matrix, matrix_format='csr'):\n \"\"\"Transform single class outputs to a 2D sparse matrix\n \n Parameters\n ----------\n matrix : array-like\n input matrix to be checked\n matrix_format : str (default is csr)\n the matrix format to validate with\n\n Returns\n -------\n scipy.sparse\n a 2-dimensional sparse matrix\n \"\"\"\n is_2d = None\n dim_1 = None\n dim_2 = None\n\n # check if array like of array likes\n if isinstance(matrix, (list, tuple, np.ndarray)):\n if isinstance(matrix[0], (list, tuple, np.ndarray)):\n is_2d = True\n dim_1 = len(matrix)\n dim_2 = len(matrix[0])\n # 1d list or array\n else:\n is_2d = False\n # shape is n_samples of 1 class assignment\n dim_1 = len(matrix)\n dim_2 = 1\n \n # not an array but 2D, probably a matrix\n elif matrix.ndim == 2:\n is_2d = True\n dim_1=matrix.shape[0]\n dim_2=matrix.shape[1]\n\n # what is it? \n else:\n raise ValueError(\"Matrix dimensions too large (>2) or other value error\")\n\n new_matrix = None\n if is_2d:\n if issparse(matrix):\n new_matrix = matrix\n else:\n new_matrix = matrix_creation_function_for_format(matrix_format)(matrix, shape=(dim_1, dim_2))\n else:\n new_matrix = matrix_creation_function_for_format(matrix_format)(matrix).T\n\n assert new_matrix.shape == (dim_1, dim_2)\n return new_matrix\n"
] |
[
[
"scipy.sparse.issparse"
]
] |
chrisl6uie/flower-image-classifier
|
[
"fed986eb2f7ef4a0fba3c1ae9d090d7853967819"
] |
[
"train.py"
] |
[
"import torch\nimport argparse\nfrom torch import nn\nfrom torch import optim\nfrom torchvision import datasets, transforms, models\nfrom torch.utils.data import Dataset, DataLoader\nfrom PIL import Image\n\n'''\n Training Script\n import utils.py and class_functions.py to run correctly\n Calls main() function to run\n Arguments are parsed in the beginning to create variables for convenience\n e.g. for checkpoint and arguments for functions from class_functions.py\n\n Run train.py and continue with predict.py\n\n Contents:\n\n main(): train.py script\n\n get_arguments():\n\n returns:\n args = parser.parse_args() for simplicity of argument parser\n\n'''\n\nimport utils\nimport class_functions\n\narchs = {\n 'alexnet' : 9216,\n 'vgg' : 25088\n}\n\ndef main():\n\n args = get_arguments()\n print(\"Arguments from command : \", args)\n\n data_dir = './flowers'\n arch = args.arch\n path = args.save_dir\n device = 'gpu' if args.gpu else 'cpu'\n hidden_layer = args.hidden_units\n output_size = args.output_size\n learning_rate = args.learning_rate\n epochs = args.epochs\n\n print(\"Initializing 'Train.py'...\")\n\n train_loader, valid_loader, test_loader, train_data = utils.load_data()\n\n model, optimizer, criterion = class_functions.setup_model(arch, hidden_layer, output_size)\n\n class_functions.train_model(model, train_loader, valid_loader, epochs, criterion, optimizer, device)\n\n class_functions.test_check(model, test_loader)\n\n model.class_to_idx = train_data.class_to_idx\n\n checkpoint = {'arch': arch,\n 'hidden_layer': hidden_layer,\n 'learning_rate': learning_rate,\n 'epochs': epochs,\n 'input_size': archs[arch],\n 'output_size': 102,\n 'state_dict': model.state_dict(),\n 'class_to_idx': model.class_to_idx,\n 'optimizer':optimizer,\n 'classifier': model.classifier}\n\n torch.save(checkpoint, path)\n\n print(\"Model Successfully Saved...\")\n\ndef get_arguments():\n\n parser = argparse.ArgumentParser(description='Training Flower Image Classifier')\n parser.add_argument('--gpu', dest=\"gpu\", action=\"store\", default='cpu', type=str, help=\"Use GPU? type 'gpu' or 'cpu'\")\n parser.add_argument('--save_dir', dest=\"save_dir\",action=\"store\", default=\"checkpoint.pth\", type=str, help=\"Path of Saved Model\")\n parser.add_argument('--learning_rate', dest=\"learning_rate\",action=\"store\", type=float, default=0.001, help=\"Learning Rate... Default Value = 0.001\")\n parser.add_argument('--epochs', dest=\"epochs\", action=\"store\", type=int, default=5, help=\"Number of Epochs... Default Value = 10\")\n parser.add_argument('--arch', dest=\"arch\", action=\"store\", default=\"alexnet\", type = str, help=\"Model Architecture... [alexnet, vgg16]... Default = 'alexnet'\")\n parser.add_argument('--hidden_units', dest=\"hidden_units\", action=\"store\", default=100, type=int, help=\"Units in Hidden Layer... Default Value = 100\")\n parser.add_argument('--output_size', dest=\"output_size\", action=\"store\", default=102, type=int, help=\"Output Size, Recommended: Do not Change\")\n parser.add_argument('data_dir', action=\"store\", default=\"./flowers\", type=str, help=\"Finding Data Directory\")\n\n args = parser.parse_args()\n\n return args\n\nif __name__ == '__main__':\n main()"
] |
[
[
"torch.save"
]
] |
Macleodsolutions/glTF-Blender-IO
|
[
"c2188bab7f68a2597109e0e4ae2d9bd5d07f0c6d"
] |
[
"addons/io_scene_gltf2/blender/exp/gltf2_blender_extract.py"
] |
[
"# Copyright 2018-2021 The glTF-Blender-IO authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom mathutils import Vector\n\nfrom . import gltf2_blender_export_keys\nfrom ...io.com.gltf2_io_debug import print_console\nfrom io_scene_gltf2.blender.exp import gltf2_blender_gather_skins\n\n\ndef extract_primitives(blender_mesh, uuid_for_skined_data, blender_vertex_groups, modifiers, export_settings):\n \"\"\"Extract primitives from a mesh.\"\"\"\n print_console('INFO', 'Extracting primitive: ' + blender_mesh.name)\n\n blender_object = None\n if uuid_for_skined_data:\n blender_object = export_settings['vtree'].nodes[uuid_for_skined_data].blender_object\n\n use_normals = export_settings[gltf2_blender_export_keys.NORMALS]\n if use_normals:\n blender_mesh.calc_normals_split()\n\n use_tangents = False\n if use_normals and export_settings[gltf2_blender_export_keys.TANGENTS]:\n if blender_mesh.uv_layers.active and len(blender_mesh.uv_layers) > 0:\n try:\n blender_mesh.calc_tangents()\n use_tangents = True\n except Exception:\n print_console('WARNING', 'Could not calculate tangents. Please try to triangulate the mesh first.')\n\n tex_coord_max = 0\n if export_settings[gltf2_blender_export_keys.TEX_COORDS]:\n if blender_mesh.uv_layers.active:\n tex_coord_max = len(blender_mesh.uv_layers)\n\n color_max = 0\n if export_settings[gltf2_blender_export_keys.COLORS]:\n color_max = len(blender_mesh.vertex_colors)\n\n armature = None\n skin = None\n if blender_vertex_groups and export_settings[gltf2_blender_export_keys.SKINS]:\n if modifiers is not None:\n modifiers_dict = {m.type: m for m in modifiers}\n if \"ARMATURE\" in modifiers_dict:\n modifier = modifiers_dict[\"ARMATURE\"]\n armature = modifier.object\n\n # Skin must be ignored if the object is parented to a bone of the armature\n # (This creates an infinite recursive error)\n # So ignoring skin in that case\n is_child_of_arma = (\n armature and\n blender_object and\n blender_object.parent_type == \"BONE\" and\n blender_object.parent.name == armature.name\n )\n if is_child_of_arma:\n armature = None\n\n if armature:\n skin = gltf2_blender_gather_skins.gather_skin(export_settings['vtree'].nodes[uuid_for_skined_data].armature, export_settings)\n if not skin:\n armature = None\n\n use_morph_normals = use_normals and export_settings[gltf2_blender_export_keys.MORPH_NORMAL]\n use_morph_tangents = use_morph_normals and use_tangents and export_settings[gltf2_blender_export_keys.MORPH_TANGENT]\n\n key_blocks = []\n if blender_mesh.shape_keys and export_settings[gltf2_blender_export_keys.MORPH]:\n key_blocks = [\n key_block\n for key_block in blender_mesh.shape_keys.key_blocks\n if not (key_block == key_block.relative_key or key_block.mute)\n ]\n\n use_materials = export_settings[gltf2_blender_export_keys.MATERIALS]\n\n # Fetch vert positions and bone data (joint,weights)\n\n locs, morph_locs = __get_positions(blender_mesh, key_blocks, armature, blender_object, export_settings)\n if skin:\n vert_bones, num_joint_sets, need_neutral_bone = __get_bone_data(blender_mesh, skin, blender_vertex_groups)\n if need_neutral_bone is True:\n # Need to create a fake joint at root of armature\n # In order to assign not assigned vertices to it\n # But for now, this is not yet possible, we need to wait the armature node is created\n # Just store this, to be used later\n armature_uuid = export_settings['vtree'].nodes[uuid_for_skined_data].armature\n export_settings['vtree'].nodes[armature_uuid].need_neutral_bone = True\n\n # In Blender there is both per-vert data, like position, and also per-loop\n # (loop=corner-of-poly) data, like normals or UVs. glTF only has per-vert\n # data, so we need to split Blender verts up into potentially-multiple glTF\n # verts.\n #\n # First, we'll collect a \"dot\" for every loop: a struct that stores all the\n # attributes at that loop, namely the vertex index (which determines all\n # per-vert data), and all the per-loop data like UVs, etc.\n #\n # Each unique dot will become one unique glTF vert.\n\n # List all fields the dot struct needs.\n dot_fields = [('vertex_index', np.uint32)]\n if use_normals:\n dot_fields += [('nx', np.float32), ('ny', np.float32), ('nz', np.float32)]\n if use_tangents:\n dot_fields += [('tx', np.float32), ('ty', np.float32), ('tz', np.float32), ('tw', np.float32)]\n for uv_i in range(tex_coord_max):\n dot_fields += [('uv%dx' % uv_i, np.float32), ('uv%dy' % uv_i, np.float32)]\n for col_i in range(color_max):\n dot_fields += [\n ('color%dr' % col_i, np.float32),\n ('color%dg' % col_i, np.float32),\n ('color%db' % col_i, np.float32),\n ('color%da' % col_i, np.float32),\n ]\n if use_morph_normals:\n for morph_i, _ in enumerate(key_blocks):\n dot_fields += [\n ('morph%dnx' % morph_i, np.float32),\n ('morph%dny' % morph_i, np.float32),\n ('morph%dnz' % morph_i, np.float32),\n ]\n\n dots = np.empty(len(blender_mesh.loops), dtype=np.dtype(dot_fields))\n\n vidxs = np.empty(len(blender_mesh.loops))\n blender_mesh.loops.foreach_get('vertex_index', vidxs)\n dots['vertex_index'] = vidxs\n del vidxs\n\n if use_normals:\n kbs = key_blocks if use_morph_normals else []\n normals, morph_normals = __get_normals(\n blender_mesh, kbs, armature, blender_object, export_settings\n )\n dots['nx'] = normals[:, 0]\n dots['ny'] = normals[:, 1]\n dots['nz'] = normals[:, 2]\n del normals\n for morph_i, ns in enumerate(morph_normals):\n dots['morph%dnx' % morph_i] = ns[:, 0]\n dots['morph%dny' % morph_i] = ns[:, 1]\n dots['morph%dnz' % morph_i] = ns[:, 2]\n del morph_normals\n\n if use_tangents:\n tangents = __get_tangents(blender_mesh, armature, blender_object, export_settings)\n dots['tx'] = tangents[:, 0]\n dots['ty'] = tangents[:, 1]\n dots['tz'] = tangents[:, 2]\n del tangents\n signs = __get_bitangent_signs(blender_mesh, armature, blender_object, export_settings)\n dots['tw'] = signs\n del signs\n\n for uv_i in range(tex_coord_max):\n uvs = __get_uvs(blender_mesh, uv_i)\n dots['uv%dx' % uv_i] = uvs[:, 0]\n dots['uv%dy' % uv_i] = uvs[:, 1]\n del uvs\n\n for col_i in range(color_max):\n colors = __get_colors(blender_mesh, col_i)\n dots['color%dr' % col_i] = colors[:, 0]\n dots['color%dg' % col_i] = colors[:, 1]\n dots['color%db' % col_i] = colors[:, 2]\n dots['color%da' % col_i] = colors[:, 3]\n del colors\n\n # Calculate triangles and sort them into primitives.\n\n blender_mesh.calc_loop_triangles()\n loop_indices = np.empty(len(blender_mesh.loop_triangles) * 3, dtype=np.uint32)\n blender_mesh.loop_triangles.foreach_get('loops', loop_indices)\n\n prim_indices = {} # maps material index to TRIANGLES-style indices into dots\n\n if use_materials == \"NONE\": # Only for None. For placeholder and export, keep primitives\n # Put all vertices into one primitive\n prim_indices[-1] = loop_indices\n\n else:\n # Bucket by material index.\n\n tri_material_idxs = np.empty(len(blender_mesh.loop_triangles), dtype=np.uint32)\n blender_mesh.loop_triangles.foreach_get('material_index', tri_material_idxs)\n loop_material_idxs = np.repeat(tri_material_idxs, 3) # material index for every loop\n unique_material_idxs = np.unique(tri_material_idxs)\n del tri_material_idxs\n\n for material_idx in unique_material_idxs:\n prim_indices[material_idx] = loop_indices[loop_material_idxs == material_idx]\n\n # Create all the primitives.\n\n primitives = []\n\n for material_idx, dot_indices in prim_indices.items():\n # Extract just dots used by this primitive, deduplicate them, and\n # calculate indices into this deduplicated list.\n prim_dots = dots[dot_indices]\n prim_dots, indices = np.unique(prim_dots, return_inverse=True)\n\n if len(prim_dots) == 0:\n continue\n\n # Now just move all the data for prim_dots into attribute arrays\n\n attributes = {}\n\n blender_idxs = prim_dots['vertex_index']\n\n attributes['POSITION'] = locs[blender_idxs]\n\n for morph_i, vs in enumerate(morph_locs):\n attributes['MORPH_POSITION_%d' % morph_i] = vs[blender_idxs]\n\n if use_normals:\n normals = np.empty((len(prim_dots), 3), dtype=np.float32)\n normals[:, 0] = prim_dots['nx']\n normals[:, 1] = prim_dots['ny']\n normals[:, 2] = prim_dots['nz']\n attributes['NORMAL'] = normals\n\n if use_tangents:\n tangents = np.empty((len(prim_dots), 4), dtype=np.float32)\n tangents[:, 0] = prim_dots['tx']\n tangents[:, 1] = prim_dots['ty']\n tangents[:, 2] = prim_dots['tz']\n tangents[:, 3] = prim_dots['tw']\n attributes['TANGENT'] = tangents\n\n if use_morph_normals:\n for morph_i, _ in enumerate(key_blocks):\n ns = np.empty((len(prim_dots), 3), dtype=np.float32)\n ns[:, 0] = prim_dots['morph%dnx' % morph_i]\n ns[:, 1] = prim_dots['morph%dny' % morph_i]\n ns[:, 2] = prim_dots['morph%dnz' % morph_i]\n attributes['MORPH_NORMAL_%d' % morph_i] = ns\n\n if use_morph_tangents:\n attributes['MORPH_TANGENT_%d' % morph_i] = __calc_morph_tangents(normals, ns, tangents)\n\n for tex_coord_i in range(tex_coord_max):\n uvs = np.empty((len(prim_dots), 2), dtype=np.float32)\n uvs[:, 0] = prim_dots['uv%dx' % tex_coord_i]\n uvs[:, 1] = prim_dots['uv%dy' % tex_coord_i]\n attributes['TEXCOORD_%d' % tex_coord_i] = uvs\n\n for color_i in range(color_max):\n colors = np.empty((len(prim_dots), 4), dtype=np.float32)\n colors[:, 0] = prim_dots['color%dr' % color_i]\n colors[:, 1] = prim_dots['color%dg' % color_i]\n colors[:, 2] = prim_dots['color%db' % color_i]\n colors[:, 3] = prim_dots['color%da' % color_i]\n attributes['COLOR_%d' % color_i] = colors\n\n if skin:\n joints = [[] for _ in range(num_joint_sets)]\n weights = [[] for _ in range(num_joint_sets)]\n\n for vi in blender_idxs:\n bones = vert_bones[vi]\n for j in range(0, 4 * num_joint_sets):\n if j < len(bones):\n joint, weight = bones[j]\n else:\n joint, weight = 0, 0.0\n joints[j//4].append(joint)\n weights[j//4].append(weight)\n\n for i, (js, ws) in enumerate(zip(joints, weights)):\n attributes['JOINTS_%d' % i] = js\n attributes['WEIGHTS_%d' % i] = ws\n\n primitives.append({\n 'attributes': attributes,\n 'indices': indices,\n 'material': material_idx,\n })\n\n if export_settings['gltf_loose_edges']:\n # Find loose edges\n loose_edges = [e for e in blender_mesh.edges if e.is_loose]\n blender_idxs = [vi for e in loose_edges for vi in e.vertices]\n\n if blender_idxs:\n # Export one glTF vert per unique Blender vert in a loose edge\n blender_idxs = np.array(blender_idxs, dtype=np.uint32)\n blender_idxs, indices = np.unique(blender_idxs, return_inverse=True)\n\n attributes = {}\n\n attributes['POSITION'] = locs[blender_idxs]\n\n for morph_i, vs in enumerate(morph_locs):\n attributes['MORPH_POSITION_%d' % morph_i] = vs[blender_idxs]\n\n if skin:\n joints = [[] for _ in range(num_joint_sets)]\n weights = [[] for _ in range(num_joint_sets)]\n\n for vi in blender_idxs:\n bones = vert_bones[vi]\n for j in range(0, 4 * num_joint_sets):\n if j < len(bones):\n joint, weight = bones[j]\n else:\n joint, weight = 0, 0.0\n joints[j//4].append(joint)\n weights[j//4].append(weight)\n\n for i, (js, ws) in enumerate(zip(joints, weights)):\n attributes['JOINTS_%d' % i] = js\n attributes['WEIGHTS_%d' % i] = ws\n\n primitives.append({\n 'attributes': attributes,\n 'indices': indices,\n 'mode': 1, # LINES\n 'material': 0,\n })\n\n if export_settings['gltf_loose_points']:\n # Find loose points\n verts_in_edge = set(vi for e in blender_mesh.edges for vi in e.vertices)\n blender_idxs = [\n vi for vi, _ in enumerate(blender_mesh.vertices)\n if vi not in verts_in_edge\n ]\n\n if blender_idxs:\n blender_idxs = np.array(blender_idxs, dtype=np.uint32)\n\n attributes = {}\n\n attributes['POSITION'] = locs[blender_idxs]\n\n for morph_i, vs in enumerate(morph_locs):\n attributes['MORPH_POSITION_%d' % morph_i] = vs[blender_idxs]\n\n if skin:\n joints = [[] for _ in range(num_joint_sets)]\n weights = [[] for _ in range(num_joint_sets)]\n\n for vi in blender_idxs:\n bones = vert_bones[vi]\n for j in range(0, 4 * num_joint_sets):\n if j < len(bones):\n joint, weight = bones[j]\n else:\n joint, weight = 0, 0.0\n joints[j//4].append(joint)\n weights[j//4].append(weight)\n\n for i, (js, ws) in enumerate(zip(joints, weights)):\n attributes['JOINTS_%d' % i] = js\n attributes['WEIGHTS_%d' % i] = ws\n\n primitives.append({\n 'attributes': attributes,\n 'mode': 0, # POINTS\n 'material': 0,\n })\n\n print_console('INFO', 'Primitives created: %d' % len(primitives))\n\n return primitives\n\n\ndef __get_positions(blender_mesh, key_blocks, armature, blender_object, export_settings):\n locs = np.empty(len(blender_mesh.vertices) * 3, dtype=np.float32)\n source = key_blocks[0].relative_key.data if key_blocks else blender_mesh.vertices\n source.foreach_get('co', locs)\n locs = locs.reshape(len(blender_mesh.vertices), 3)\n\n morph_locs = []\n for key_block in key_blocks:\n vs = np.empty(len(blender_mesh.vertices) * 3, dtype=np.float32)\n key_block.data.foreach_get('co', vs)\n vs = vs.reshape(len(blender_mesh.vertices), 3)\n morph_locs.append(vs)\n\n # Transform for skinning\n if armature and blender_object:\n apply_matrix = armature.matrix_world.inverted_safe() @ blender_object.matrix_world\n loc_transform = armature.matrix_world @ apply_matrix\n\n loc_transform = blender_object.matrix_world\n locs[:] = __apply_mat_to_all(loc_transform, locs)\n for vs in morph_locs:\n vs[:] = __apply_mat_to_all(loc_transform, vs)\n\n # glTF stores deltas in morph targets\n for vs in morph_locs:\n vs -= locs\n\n if export_settings[gltf2_blender_export_keys.YUP]:\n __zup2yup(locs)\n for vs in morph_locs:\n __zup2yup(vs)\n\n return locs, morph_locs\n\n\ndef __get_normals(blender_mesh, key_blocks, armature, blender_object, export_settings):\n \"\"\"Get normal for each loop.\"\"\"\n if key_blocks:\n normals = key_blocks[0].relative_key.normals_split_get()\n normals = np.array(normals, dtype=np.float32)\n else:\n normals = np.empty(len(blender_mesh.loops) * 3, dtype=np.float32)\n blender_mesh.calc_normals_split()\n blender_mesh.loops.foreach_get('normal', normals)\n\n normals = normals.reshape(len(blender_mesh.loops), 3)\n\n morph_normals = []\n for key_block in key_blocks:\n ns = np.array(key_block.normals_split_get(), dtype=np.float32)\n ns = ns.reshape(len(blender_mesh.loops), 3)\n morph_normals.append(ns)\n\n # Transform for skinning\n if armature and blender_object:\n apply_matrix = (armature.matrix_world.inverted_safe() @ blender_object.matrix_world)\n apply_matrix = apply_matrix.to_3x3().inverted_safe().transposed()\n normal_transform = armature.matrix_world.to_3x3() @ apply_matrix\n\n normals[:] = __apply_mat_to_all(normal_transform, normals)\n __normalize_vecs(normals)\n for ns in morph_normals:\n ns[:] = __apply_mat_to_all(normal_transform, ns)\n __normalize_vecs(ns)\n\n for ns in [normals, *morph_normals]:\n # Replace zero normals with the unit UP vector.\n # Seems to happen sometimes with degenerate tris?\n is_zero = ~ns.any(axis=1)\n ns[is_zero, 2] = 1\n\n # glTF stores deltas in morph targets\n for ns in morph_normals:\n ns -= normals\n\n if export_settings[gltf2_blender_export_keys.YUP]:\n __zup2yup(normals)\n for ns in morph_normals:\n __zup2yup(ns)\n\n return normals, morph_normals\n\n\ndef __get_tangents(blender_mesh, armature, blender_object, export_settings):\n \"\"\"Get an array of the tangent for each loop.\"\"\"\n tangents = np.empty(len(blender_mesh.loops) * 3, dtype=np.float32)\n blender_mesh.loops.foreach_get('tangent', tangents)\n tangents = tangents.reshape(len(blender_mesh.loops), 3)\n\n # Transform for skinning\n if armature and blender_object:\n apply_matrix = armature.matrix_world.inverted_safe() @ blender_object.matrix_world\n tangent_transform = apply_matrix.to_quaternion().to_matrix()\n tangents = __apply_mat_to_all(tangent_transform, tangents)\n __normalize_vecs(tangents)\n\n if export_settings[gltf2_blender_export_keys.YUP]:\n __zup2yup(tangents)\n\n return tangents\n\n\ndef __get_bitangent_signs(blender_mesh, armature, blender_object, export_settings):\n signs = np.empty(len(blender_mesh.loops), dtype=np.float32)\n blender_mesh.loops.foreach_get('bitangent_sign', signs)\n\n # Transform for skinning\n if armature and blender_object:\n # Bitangent signs should flip when handedness changes\n # TODO: confirm\n apply_matrix = armature.matrix_world.inverted_safe() @ blender_object.matrix_world\n tangent_transform = apply_matrix.to_quaternion().to_matrix()\n flipped = tangent_transform.determinant() < 0\n if flipped:\n signs *= -1\n\n # No change for Zup -> Yup\n\n return signs\n\n\ndef __calc_morph_tangents(normals, morph_normal_deltas, tangents):\n # TODO: check if this works\n morph_tangent_deltas = np.empty((len(normals), 3), dtype=np.float32)\n\n for i in range(len(normals)):\n n = Vector(normals[i])\n morph_n = n + Vector(morph_normal_deltas[i]) # convert back to non-delta\n t = Vector(tangents[i, :3])\n\n rotation = morph_n.rotation_difference(n)\n\n t_morph = Vector(t)\n t_morph.rotate(rotation)\n morph_tangent_deltas[i] = t_morph - t # back to delta\n\n return morph_tangent_deltas\n\n\ndef __get_uvs(blender_mesh, uv_i):\n layer = blender_mesh.uv_layers[uv_i]\n uvs = np.empty(len(blender_mesh.loops) * 2, dtype=np.float32)\n layer.data.foreach_get('uv', uvs)\n uvs = uvs.reshape(len(blender_mesh.loops), 2)\n\n # Blender UV space -> glTF UV space\n # u,v -> u,1-v\n uvs[:, 1] *= -1\n uvs[:, 1] += 1\n\n return uvs\n\n\ndef __get_colors(blender_mesh, color_i):\n layer = blender_mesh.vertex_colors[color_i]\n colors = np.empty(len(blender_mesh.loops) * 4, dtype=np.float32)\n layer.data.foreach_get('color', colors)\n colors = colors.reshape(len(blender_mesh.loops), 4)\n\n # sRGB -> Linear\n rgb = colors[:, :-1]\n not_small = rgb >= 0.04045\n small_result = np.where(rgb < 0.0, 0.0, rgb * (1.0 / 12.92))\n large_result = np.power((rgb + 0.055) * (1.0 / 1.055), 2.4, where=not_small)\n rgb[:] = np.where(not_small, large_result, small_result)\n\n return colors\n\n\ndef __get_bone_data(blender_mesh, skin, blender_vertex_groups):\n\n need_neutral_bone = False\n\n joint_name_to_index = {joint.name: index for index, joint in enumerate(skin.joints)}\n group_to_joint = [joint_name_to_index.get(g.name) for g in blender_vertex_groups]\n\n # List of (joint, weight) pairs for each vert\n vert_bones = []\n max_num_influences = 0\n\n for vertex in blender_mesh.vertices:\n bones = []\n if vertex.groups:\n for group_element in vertex.groups:\n weight = group_element.weight\n if weight <= 0.0:\n continue\n try:\n joint = group_to_joint[group_element.group]\n except Exception:\n continue\n if joint is None:\n continue\n bones.append((joint, weight))\n bones.sort(key=lambda x: x[1], reverse=True)\n if not bones:\n # Is not assign to any bone\n bones = ((len(skin.joints), 1.0),) # Assign to a joint that will be created later\n need_neutral_bone = True\n vert_bones.append(bones)\n if len(bones) > max_num_influences:\n max_num_influences = len(bones)\n\n # How many joint sets do we need? 1 set = 4 influences\n num_joint_sets = (max_num_influences + 3) // 4\n\n return vert_bones, num_joint_sets, need_neutral_bone\n\n\ndef __zup2yup(array):\n # x,y,z -> x,z,-y\n array[:, [1,2]] = array[:, [2,1]] # x,z,y\n array[:, 2] *= -1 # x,z,-y\n\n\ndef __apply_mat_to_all(matrix, vectors):\n \"\"\"Given matrix m and vectors [v1,v2,...], computes [m@v1,m@v2,...]\"\"\"\n # Linear part\n m = matrix.to_3x3() if len(matrix) == 4 else matrix\n res = np.matmul(vectors, np.array(m.transposed()))\n # Translation part\n if len(matrix) == 4:\n res += np.array(matrix.translation)\n return res\n\n\ndef __normalize_vecs(vectors):\n norms = np.linalg.norm(vectors, axis=1, keepdims=True)\n np.divide(vectors, norms, out=vectors, where=norms != 0)\n"
] |
[
[
"numpy.power",
"numpy.unique",
"numpy.linalg.norm",
"numpy.dtype",
"numpy.repeat",
"numpy.array",
"numpy.where",
"numpy.divide"
]
] |
zmskye/unsupervised_captioning
|
[
"e8d03c1346e26e01078dc120fd37b3215c4d2bbd",
"e8d03c1346e26e01078dc120fd37b3215c4d2bbd"
] |
[
"preprocessing/detect_objects.py",
"preprocessing/process_descriptions.py"
] |
[
"\"\"\"Detect objects using a model pretrained on OpenImage.\"\"\"\nimport multiprocessing\nimport os\n\nimport h5py\nimport numpy as np\nfrom PIL import Image\nfrom absl import app\nfrom absl import flags\nfrom tqdm import tqdm\n\nfrom config import TF_MODELS_PATH\n\nflags.DEFINE_string('image_path', None, 'data dir')\n\nflags.DEFINE_integer('num_proc', 1, 'number of process')\n\nflags.DEFINE_integer('num_gpus', 4, 'number of gpus to use')\n\nFLAGS = flags.FLAGS\n\n\ndef load_image_into_numpy_array(image):\n if image.mode != 'RGB':\n image = image.convert('RGB')\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n\ndef initializer():\n import tensorflow as tf\n current = multiprocessing.current_process()\n id = current._identity[0] - 1\n os.environ['CUDA_VISIBLE_DEVICES'] = '%d' % (id % FLAGS.num_gpus)\n\n model_name = 'faster_rcnn_inception_resnet_v2_atrous_oid_2018_01_28'\n path_to_ckpt = (TF_MODELS_PATH + '/research/object_detection/' + model_name\n + '/frozen_inference_graph.pb')\n\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(path_to_ckpt, 'rb') as fid:\n od_graph_def.ParseFromString(fid.read())\n tf.import_graph_def(od_graph_def, name='')\n\n global sess, tensor_dict, image_tensor\n with detection_graph.as_default():\n sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(\n allow_growth=True)))\n ops = tf.get_default_graph().get_operations()\n all_tensor_names = {output.name for op in ops for output in op.outputs}\n tensor_dict = {}\n for key in [\n 'num_detections', 'detection_boxes', 'detection_scores',\n 'detection_classes', 'detection_masks'\n ]:\n tensor_name = key + ':0'\n if tensor_name in all_tensor_names:\n tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(\n tensor_name)\n image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')\n\n\ndef run(i):\n global sess, tensor_dict, image_tensor\n image_path = FLAGS.image_path + '/' + i.strip()\n image = Image.open(image_path)\n image = load_image_into_numpy_array(image)\n image_np_expanded = np.expand_dims(image, axis=0)\n output_dict = sess.run(tensor_dict,\n feed_dict={image_tensor: image_np_expanded})\n return i, output_dict\n\n\ndef main(_):\n pool = multiprocessing.Pool(FLAGS.num_proc, initializer)\n with open('data/coco_train.txt', 'r') as f:\n train_images = list(f)\n with open('data/coco_val.txt', 'r') as f:\n val_images = list(f)\n with open('data/coco_test.txt', 'r') as f:\n test_images = list(f)\n all_images = train_images + val_images + test_images\n with h5py.File('data/object.hdf5', 'w') as f:\n for ret in tqdm(pool.imap_unordered(run, all_images),\n total=len(all_images)):\n name = os.path.splitext(ret[0])[0]\n g = f.create_group(name)\n output_dict = ret[1]\n n = int(output_dict['num_detections'])\n del output_dict['num_detections']\n for k, v in output_dict.items():\n g.create_dataset(k, data=v[0, :n])\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"\"\"\"Convert the descriptions to tfrecords.\"\"\"\nimport pickle as pkl\nimport json\nimport os\nimport random\nimport re\nimport sys\nfrom urllib.request import Request\nfrom urllib.request import urlopen\n\nimport tensorflow as tf\nfrom absl import app\nfrom absl import flags\nfrom tqdm import tqdm\n\nfrom config import TF_MODELS_PATH\nfrom misc_fn import _int64_feature_list\n\nsys.path.insert(0, TF_MODELS_PATH + '/research/im2txt/im2txt')\nsys.path.append(TF_MODELS_PATH + '/research')\nsys.path.append(TF_MODELS_PATH + '/research/object_detection')\nfrom data.build_mscoco_data import _create_vocab\nfrom inference_utils import vocabulary\nfrom utils import label_map_util\n\ntf.enable_eager_execution()\n\nflags.DEFINE_bool('new_dict', False, 'generate a new dict')\n\nFLAGS = flags.FLAGS\n\n\ndef get_plural(word):\n c = re.compile('Noun</span> <p> \\(.*<i>plural</i> ([^\\)]+)\\)')\n req = Request('https://www.yourdictionary.com/' + word, headers={\n 'User-Agent': 'Magic Browser'})\n f = urlopen(req)\n html = f.read()\n f.close()\n html = html.decode('utf-8')\n plural_word = c.findall(html)\n if plural_word:\n plural_word = plural_word[0]\n plural_word = plural_word.lower()\n elif 'Noun</span> <p> (<i>plural only)' in html:\n plural_word = word\n else:\n plural_word = word\n if word[-1] != 's':\n plural_word += 's'\n return plural_word\n\n\ndef get_open_image_categories():\n path_to_labels = (TF_MODELS_PATH + '/research/object_detection/data/'\n 'oid_bbox_trainable_label_map.pbtxt')\n category_index = label_map_util.create_category_index_from_labelmap(\n path_to_labels,\n use_display_name=True)\n categories = dict([(v['id'], str(v['name'].lower()).split()[-1]) for k, v in\n category_index.items()])\n category_name = list(set(categories.values()))\n category_name.sort()\n plural_file = 'data/plural_words.json'\n if os.path.exists(plural_file):\n with open(plural_file, 'r') as f:\n plural_dict = json.load(f)\n plural_name = [plural_dict[i] for i in category_name]\n else:\n plural_name = []\n for i in tqdm(category_name):\n plural_name.append(get_plural(i))\n with open(plural_file, 'w') as f:\n json.dump(dict(zip(category_name, plural_name)), f)\n return category_name, plural_name, categories\n\n\ndef parse_key_words(caption, dic):\n key_words = dic.intersection(caption)\n return key_words\n\n\ndef sentence_generator():\n category_name, plural_name, categories = get_open_image_categories()\n replace = dict(zip(plural_name, category_name))\n category_set = set(category_name)\n\n with open('data/sentences.pkl', 'r') as f:\n captions = pkl.load(f)\n\n if FLAGS.new_dict:\n _create_vocab(captions)\n with open('data/glove_vocab.pkl', 'r') as f:\n glove = pkl.load(f)\n glove.append('<S>')\n glove.append('</S>')\n glove = set(glove)\n with open(FLAGS.word_counts_output_file, 'r') as f:\n vocab = list(f)\n vocab = [i.strip() for i in vocab]\n vocab = [i.split() for i in vocab]\n vocab = [(i, int(j)) for i, j in vocab if i in glove]\n word_counts = [i for i in vocab if i[0] in category_set or i[1] >= 40]\n words = set([i[0] for i in word_counts])\n for i in category_name:\n if i not in words:\n word_counts.append((i, 0))\n with open(FLAGS.word_counts_output_file, 'w') as f:\n f.write('\\n'.join(['%s %d' % (w, c) for w, c in word_counts]))\n\n vocab = vocabulary.Vocabulary(FLAGS.word_counts_output_file)\n\n all_ids = dict([(k, vocab.word_to_id(v)) for k, v in categories.items()])\n with open('data/all_ids.pkl', 'w') as f:\n pkl.dump(all_ids, f)\n\n context = tf.train.Features()\n random.shuffle(captions)\n for c in captions:\n for i, w in enumerate(c):\n if w in replace:\n c[i] = replace[w]\n k = parse_key_words(c, category_set)\n c = [vocab.word_to_id(word) for word in c]\n if c.count(vocab.unk_id) > len(c) * 0.15:\n continue\n k = [vocab.word_to_id(i) for i in k]\n feature_lists = tf.train.FeatureLists(feature_list={\n 'key': _int64_feature_list(k),\n 'sentence': _int64_feature_list(c)\n })\n sequence_example = tf.train.SequenceExample(\n context=context, feature_lists=feature_lists)\n yield sequence_example.SerializeToString()\n\n\ndef main(_):\n ds = tf.data.Dataset.from_generator(sentence_generator,\n output_types=tf.string, output_shapes=())\n tfrec = tf.data.experimental.TFRecordWriter('data/sentence.tfrec')\n tfrec.write(ds)\n\n\nif __name__ == '__main__':\n app.run(main)\n"
] |
[
[
"tensorflow.Graph",
"numpy.expand_dims",
"tensorflow.import_graph_def",
"tensorflow.gfile.GFile",
"tensorflow.GPUOptions",
"tensorflow.get_default_graph",
"tensorflow.GraphDef"
],
[
"tensorflow.enable_eager_execution",
"tensorflow.train.SequenceExample",
"tensorflow.data.experimental.TFRecordWriter",
"tensorflow.train.Features",
"tensorflow.data.Dataset.from_generator"
]
] |
Zilong-Yuan/cdvae
|
[
"c30a148683775ff623748ce9f7797cc25179ec56"
] |
[
"scripts/compute_metrics.py"
] |
[
"from collections import Counter\nimport argparse\nimport os\nimport json\n\nimport numpy as np\nfrom pathlib import Path\nfrom tqdm import tqdm\nfrom p_tqdm import p_map\nfrom scipy.stats import wasserstein_distance\n\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.core.composition import Composition\nfrom pymatgen.core.lattice import Lattice\nfrom pymatgen.analysis.structure_matcher import StructureMatcher\nfrom matminer.featurizers.site.fingerprint import CrystalNNFingerprint\nfrom matminer.featurizers.composition.composite import ElementProperty\n\nfrom eval_utils import (\n smact_validity, structure_validity, CompScaler, get_fp_pdist,\n load_config, load_data, get_crystals_list, prop_model_eval, compute_cov)\n\nCrystalNNFP = CrystalNNFingerprint.from_preset(\"ops\")\nCompFP = ElementProperty.from_preset('magpie')\n\nPercentiles = {\n 'mp20': np.array([-3.17562208, -2.82196882, -2.52814761]),\n 'carbon': np.array([-154.527093, -154.45865733, -154.44206825]),\n 'perovskite': np.array([0.43924842, 0.61202443, 0.7364607]),\n}\n\nCOV_Cutoffs = {\n 'mp20': {'struc': 0.4, 'comp': 10.},\n 'carbon': {'struc': 0.2, 'comp': 4.},\n 'perovskite': {'struc': 0.2, 'comp': 4},\n}\n\n\nclass Crystal(object):\n\n def __init__(self, crys_array_dict):\n self.frac_coords = crys_array_dict['frac_coords']\n self.atom_types = crys_array_dict['atom_types']\n self.lengths = crys_array_dict['lengths']\n self.angles = crys_array_dict['angles']\n self.dict = crys_array_dict\n\n self.get_structure()\n self.get_composition()\n self.get_validity()\n self.get_fingerprints()\n\n def get_structure(self):\n if min(self.lengths.tolist()) < 0:\n self.constructed = False\n self.invalid_reason = 'non_positive_lattice'\n else:\n try:\n self.structure = Structure(\n lattice=Lattice.from_parameters(\n *(self.lengths.tolist() + self.angles.tolist())),\n species=self.atom_types, coords=self.frac_coords, coords_are_cartesian=False)\n self.constructed = True\n except Exception:\n self.constructed = False\n self.invalid_reason = 'construction_raises_exception'\n if self.structure.volume < 0.1:\n self.constructed = False\n self.invalid_reason = 'unrealistically_small_lattice'\n\n def get_composition(self):\n elem_counter = Counter(self.atom_types)\n composition = [(elem, elem_counter[elem])\n for elem in sorted(elem_counter.keys())]\n elems, counts = list(zip(*composition))\n counts = np.array(counts)\n counts = counts / np.gcd.reduce(counts)\n self.elems = elems\n self.comps = tuple(counts.astype('int').tolist())\n\n def get_validity(self):\n self.comp_valid = smact_validity(self.elems, self.comps)\n if self.constructed:\n self.struct_valid = structure_validity(self.structure)\n else:\n self.struct_valid = False\n self.valid = self.comp_valid and self.struct_valid\n\n def get_fingerprints(self):\n elem_counter = Counter(self.atom_types)\n comp = Composition(elem_counter)\n self.comp_fp = CompFP.featurize(comp)\n try:\n site_fps = [CrystalNNFP.featurize(\n self.structure, i) for i in range(len(self.structure))]\n except Exception:\n # counts crystal as invalid if fingerprint cannot be constructed.\n self.valid = False\n self.comp_fp = None\n self.struct_fp = None\n return\n self.struct_fp = np.array(site_fps).mean(axis=0)\n\n\nclass RecEval(object):\n\n def __init__(self, pred_crys, gt_crys, stol=0.5, angle_tol=10, ltol=0.3):\n assert len(pred_crys) == len(gt_crys)\n self.matcher = StructureMatcher(\n stol=stol, angle_tol=angle_tol, ltol=ltol)\n self.preds = pred_crys\n self.gts = gt_crys\n\n def get_match_rate_and_rms(self):\n def process_one(pred, gt, is_valid):\n if not is_valid:\n return None\n try:\n rms_dist = self.matcher.get_rms_dist(\n pred.structure, gt.structure)\n rms_dist = None if rms_dist is None else rms_dist[0]\n return rms_dist\n except Exception:\n return None\n validity = [c.valid for c in self.preds]\n\n rms_dists = []\n for i in tqdm(range(len(self.preds))):\n rms_dists.append(process_one(\n self.preds[i], self.gts[i], validity[i]))\n rms_dists = np.array(rms_dists)\n match_rate = sum(rms_dists != None) / len(self.preds)\n mean_rms_dist = rms_dists[rms_dists != None].mean()\n return {'match_rate': match_rate,\n 'rms_dist': mean_rms_dist}\n\n def get_metrics(self):\n return self.get_match_rate_and_rms()\n\n\nclass GenEval(object):\n\n def __init__(self, pred_crys, gt_crys, n_samples=1000, eval_model_name=None):\n self.crys = pred_crys\n self.gt_crys = gt_crys\n self.n_samples = n_samples\n self.eval_model_name = eval_model_name\n\n valid_crys = [c for c in pred_crys if c.valid]\n if len(valid_crys) >= n_samples:\n sampled_indices = np.random.choice(\n len(valid_crys), n_samples, replace=False)\n self.valid_samples = [valid_crys[i] for i in sampled_indices]\n else:\n raise Exception(\n f'not enough valid crystals in the predicted set: {len(valid_crys)}/{n_samples}')\n\n def get_validity(self):\n comp_valid = np.array([c.comp_valid for c in self.crys]).mean()\n struct_valid = np.array([c.struct_valid for c in self.crys]).mean()\n valid = np.array([c.valid for c in self.crys]).mean()\n return {'comp_valid': comp_valid,\n 'struct_valid': struct_valid,\n 'valid': valid}\n\n def get_comp_diversity(self):\n comp_fps = [c.comp_fp for c in self.valid_samples]\n comp_fps = CompScaler.transform(comp_fps)\n comp_div = get_fp_pdist(comp_fps)\n return {'comp_div': comp_div}\n\n def get_struct_diversity(self):\n return {'struct_div': get_fp_pdist([c.struct_fp for c in self.valid_samples])}\n\n def get_density_wdist(self):\n pred_densities = [c.structure.density for c in self.valid_samples]\n gt_densities = [c.structure.density for c in self.gt_crys]\n wdist_density = wasserstein_distance(pred_densities, gt_densities)\n return {'wdist_density': wdist_density}\n\n def get_num_elem_wdist(self):\n pred_nelems = [len(set(c.structure.species))\n for c in self.valid_samples]\n gt_nelems = [len(set(c.structure.species)) for c in self.gt_crys]\n wdist_num_elems = wasserstein_distance(pred_nelems, gt_nelems)\n return {'wdist_num_elems': wdist_num_elems}\n\n def get_prop_wdist(self):\n if self.eval_model_name is not None:\n pred_props = prop_model_eval(self.eval_model_name, [\n c.dict for c in self.valid_samples])\n gt_props = prop_model_eval(self.eval_model_name, [\n c.dict for c in self.gt_crys])\n wdist_prop = wasserstein_distance(pred_props, gt_props)\n return {'wdist_prop': wdist_prop}\n else:\n return {'wdist_prop': None}\n\n def get_coverage(self):\n cutoff_dict = COV_Cutoffs[self.eval_model_name]\n (cov_metrics_dict, combined_dist_dict) = compute_cov(\n self.crys, self.gt_crys,\n struc_cutoff=cutoff_dict['struc'],\n comp_cutoff=cutoff_dict['comp'])\n return cov_metrics_dict\n\n def get_metrics(self):\n metrics = {}\n metrics.update(self.get_validity())\n metrics.update(self.get_comp_diversity())\n metrics.update(self.get_struct_diversity())\n metrics.update(self.get_density_wdist())\n metrics.update(self.get_num_elem_wdist())\n metrics.update(self.get_prop_wdist())\n print(metrics)\n metrics.update(self.get_coverage())\n return metrics\n\n\nclass OptEval(object):\n\n def __init__(self, crys, num_opt=100, eval_model_name=None):\n \"\"\"\n crys is a list of length (<step_opt> * <num_opt>),\n where <num_opt> is the number of different initialization for optimizing crystals,\n and <step_opt> is the number of saved crystals for each intialzation.\n default to minimize the property.\n \"\"\"\n step_opt = int(len(crys) / num_opt)\n self.crys = crys\n self.step_opt = step_opt\n self.num_opt = num_opt\n self.eval_model_name = eval_model_name\n\n def get_success_rate(self):\n valid_indices = np.array([c.valid for c in self.crys])\n valid_indices = valid_indices.reshape(self.step_opt, self.num_opt)\n valid_x, valid_y = valid_indices.nonzero()\n props = np.ones([self.step_opt, self.num_opt]) * np.inf\n valid_crys = [c for c in self.crys if c.valid]\n if len(valid_crys) == 0:\n sr_5, sr_10, sr_15 = 0, 0, 0\n else:\n pred_props = prop_model_eval(self.eval_model_name, [\n c.dict for c in valid_crys])\n percentiles = Percentiles[self.eval_model_name]\n props[valid_x, valid_y] = pred_props\n best_props = props.min(axis=0)\n sr_5 = (best_props <= percentiles[0]).mean()\n sr_10 = (best_props <= percentiles[1]).mean()\n sr_15 = (best_props <= percentiles[2]).mean()\n return {'SR5': sr_5, 'SR10': sr_10, 'SR15': sr_15}\n\n def get_metrics(self):\n return self.get_success_rate()\n\n\ndef get_file_paths(root_path, task, label='', suffix='pt'):\n if args.label == '':\n out_name = f'eval_{task}.{suffix}'\n else:\n out_name = f'eval_{task}_{label}.{suffix}'\n out_name = os.path.join(root_path, out_name)\n return out_name\n\n\ndef get_crystal_array_list(file_path, batch_idx=0):\n data = load_data(file_path)\n crys_array_list = get_crystals_list(\n data['frac_coords'][batch_idx],\n data['atom_types'][batch_idx],\n data['lengths'][batch_idx],\n data['angles'][batch_idx],\n data['num_atoms'][batch_idx])\n\n if 'input_data_batch' in data:\n batch = data['input_data_batch']\n if isinstance(batch, dict):\n true_crystal_array_list = get_crystals_list(\n batch['frac_coords'], batch['atom_types'], batch['lengths'],\n batch['angles'], batch['num_atoms'])\n else:\n true_crystal_array_list = get_crystals_list(\n batch.frac_coords, batch.atom_types, batch.lengths,\n batch.angles, batch.num_atoms)\n else:\n true_crystal_array_list = None\n\n return crys_array_list, true_crystal_array_list\n\n\ndef main(args):\n all_metrics = {}\n\n cfg = load_config(args.root_path)\n eval_model_name = cfg.data.eval_model_name\n\n if 'recon' in args.tasks:\n recon_file_path = get_file_paths(args.root_path, 'recon', args.label)\n crys_array_list, true_crystal_array_list = get_crystal_array_list(\n recon_file_path)\n pred_crys = p_map(lambda x: Crystal(x), crys_array_list)\n gt_crys = p_map(lambda x: Crystal(x), true_crystal_array_list)\n\n rec_evaluator = RecEval(pred_crys, gt_crys)\n recon_metrics = rec_evaluator.get_metrics()\n all_metrics.update(recon_metrics)\n\n if 'gen' in args.tasks:\n gen_file_path = get_file_paths(args.root_path, 'gen', args.label)\n recon_file_path = get_file_paths(args.root_path, 'recon', args.label)\n crys_array_list, _ = get_crystal_array_list(gen_file_path)\n gen_crys = p_map(lambda x: Crystal(x), crys_array_list)\n if 'recon' not in args.tasks:\n _, true_crystal_array_list = get_crystal_array_list(\n recon_file_path)\n gt_crys = p_map(lambda x: Crystal(x), true_crystal_array_list)\n\n gen_evaluator = GenEval(\n gen_crys, gt_crys, eval_model_name=eval_model_name)\n gen_metrics = gen_evaluator.get_metrics()\n all_metrics.update(gen_metrics)\n\n if 'opt' in args.tasks:\n opt_file_path = get_file_paths(args.root_path, 'opt', args.label)\n crys_array_list, _ = get_crystal_array_list(opt_file_path)\n opt_crys = p_map(lambda x: Crystal(x), crys_array_list)\n\n opt_evaluator = OptEval(opt_crys, eval_model_name=eval_model_name)\n opt_metrics = opt_evaluator.get_metrics()\n all_metrics.update(opt_metrics)\n\n print(all_metrics)\n\n if args.label == '':\n metrics_out_file = 'eval_metrics.json'\n else:\n metrics_out_file = f'eval_metrics_{args.label}.json'\n metrics_out_file = os.path.join(args.root_path, metrics_out_file)\n\n # only overwrite metrics computed in the new run.\n if Path(metrics_out_file).exists():\n with open(metrics_out_file, 'r') as f:\n written_metrics = json.load(f)\n if isinstance(written_metrics, dict):\n written_metrics.update(all_metrics)\n else:\n with open(metrics_out_file, 'w') as f:\n json.dump(all_metrics, f)\n if isinstance(written_metrics, dict):\n with open(metrics_out_file, 'w') as f:\n json.dump(written_metrics, f)\n else:\n with open(metrics_out_file, 'w') as f:\n json.dump(all_metrics, f)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--root_path', required=True)\n parser.add_argument('--label', default='')\n parser.add_argument('--tasks', nargs='+', default=['recon', 'gen', 'opt'])\n args = parser.parse_args()\n main(args)\n"
] |
[
[
"numpy.gcd.reduce",
"numpy.array",
"scipy.stats.wasserstein_distance",
"numpy.ones"
]
] |
JLuisRojas/reconocimiento-de-voz
|
[
"59282ffd6841f22e514a7055cb4d20ef97181b90"
] |
[
"src/pipeline/ds2_pipeline.py"
] |
[
"import json\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow.keras.optimizers import Adam, RMSprop\nimport matplotlib.pyplot as plt\n\nfrom .pipeline import Pipeline\n#from .. import ctc\nfrom ..ctc import get_loss\nfrom ..metricas import wer\n\n\"\"\"\nSe define el pipeline de entrenamiento para Deep Speech 2, considerando:\n\t- CTC loss\n\"\"\"\n\nclass DS2Pipeline(Pipeline):\n\n\tdef memory(self):\n\t\timport os\n\t\timport psutil\n\t\tpid = os.getpid()\n\t\tpy = psutil.Process(pid)\n\t\tmemoryUse = py.memory_info()[0]/2.**30 # memory use in GB...I think\n\t\tprint('memory use:', memoryUse)\n\n\n\t\"\"\"\n\tConstructorydel pipeline de ds2\n\tArgs:\n\t\tmodel: Keras Model que se va usar para entrenarlos\n\t\tfeatures: objeto de tipo FeatureExtractor para obtener features del dataset\n\t\tvocabulario: vocabulario del dataset\n\t\tdataset_distrib: objeto de tipo DataDistrib que define la distribucion del dataset\n\t\"\"\"\n\tdef __init__(self, model=None, features=None, vocabulario=None, \n\t\t\tdataset_distrib=None, nombre=\"DS2\"):\n\t\tself.nombre = nombre\n\t\tself.model = model\n\t\tself.features = features\n\t\tself.vocabulario = vocabulario\n\t\tself.dataset_distrib = dataset_distrib\n\n\t\tself.logs_path = \"./training_logs/\"\n\n\n\tdef get_config(self):\n\t\treturn {\n\t\t\t\"nombre\": self.nombre,\n\t\t\t\"descripcion\": \"Deep Speech 2 Pipeline\",\n\t\t\t\"features\": self.features.get_config(),\n\t\t\t\"vocabulario\": self.vocabulario.get_config(),\n\t\t\t\"dataset_dsitrib\": self.dataset_distrib.get_config()\n\t\t}\n\n\[email protected]\n\tdef predict(self, x, training=True):\n\t\t\treturn self.model(x, training=training)\n\n\[email protected]\n\tdef loss(self, x, y, training):\n\t\t\ty_ = self.predict(x, training=True)\n\n\t\t\treturn y_, get_loss(y, y_)\n\n\[email protected]\n\tdef grad(self, inputs, targets):\n\t\t\twith tf.GradientTape() as tape:\n\t\t\t\ty_, loss_value = self.loss(inputs, targets, training=True)\n\n\t\t\treturn y_, loss_value, tape.gradient(loss_value, self.model.trainable_variables)\n\t\"\"\"\n\tInicia el entrenamiento del modelo con las descripciones del dataset\n\tArgs:\n\t\ttrain_descrip: objeto de tipo DataDescripcion con la descripcion del\n\t\t\t\t\t dataset de entrenamiento\n\t\ttest_descirp: objeto de tipo DataDescripcion con las descripcion del\n\t\t\t\t\t dataset de pruebas\n\t\tsetup: diccionario con hyperparametros y setup de entrenamiento\n\t\"\"\"\n\tdef fit(self, train_descrip, test_descrip, setup):\n\t\twith open(self.logs_path + self.nombre + \"/\" + \"training.json\", \"w\") as json_file:\n\t\t\tjson.dump({\n\t\t\t\t\t\"setup\": setup,\n\t\t\t\t\t\"train_descrip\": train_descrip.get_config(),\n\t\t\t\t\t\"test_descrip\": test_descrip.get_config()\n\t\t\t\t}, json_file, indent=4)\n\n\t\tprint(\"[INFO] Cargando distribuciones del datasetpipeline\")\n\t\ttrain = self.dataset_distrib(train_descrip)\n\t\ttest = self.dataset_distrib(test_descrip)\n\n\t\t#t_shape = train._tensors[0].get_shape()[1:]\n\n\t\tlr = setup[\"learning_rate\"]\n\t\tbs = setup[\"batch_size\"]\n\t\tepochs = setup[\"epochs\"]\n\t\ti_epoch = setup[\"initial_epoch\"]\n\n\t\toptimizer = Adam(learning_rate=3e-4)\n\t\t#optimizer = RMSprop()\n\n\t\ttrain = train.batch(bs)\n\t\ttest = test.batch(bs)\n\n\t\tself.train(optimizer, train, test, epochs)\n\n\n\tdef train(self, optimizer, train, test, epochs):\n\t\tcolumnas = [\"epoch\", \"train_loss\", \"train_WER\", \"train_EER\", \"test_loss\", \"test_WER\", \"test_EER\"]\n\t\tlogs = { }\n\t\tfor c in columnas:\n\t\t\tlogs[c] = []\n\n\t\tfor epoch in range(epochs):\n\t\t\tlogs[\"epoch\"].append(epoch)\n\t\t\tprint(\"[INFO] Iniciando epoch: {}\".format(epoch))\n\t\t\tself.memory()\n\n\t\t\tprint(\"[INFO] Training\")\n\t\t\tepoch_train_loss, epoch_train_wer, epoch_train_eer = self.train_epoch(optimizer, train, epoch)\n\t\t\tprint(\"[INFO] Testing\")\n\t\t\tepoch_test_loss, epoch_test_wer, epoch_test_eer = self.test_epoch(test, epoch)\n\n\t\t\tif not epoch_train_loss == None:\n\t\t\t\ttrain_loss_mean = tf.reduce_mean(epoch_train_loss).numpy()\n\t\t\t\ttrain_wer_mean = tf.reduce_mean(epoch_train_wer).numpy()\n\t\t\t\ttrain_eer_mean = tf.reduce_mean(epoch_train_eer).numpy()\n\n\t\t\t\ttest_loss_mean = tf.reduce_mean(epoch_test_loss).numpy()\n\t\t\t\ttest_wer_mean = tf.reduce_mean(epoch_test_wer).numpy()\n\t\t\t\ttest_eer_mean = tf.reduce_mean(epoch_test_eer).numpy()\n\n\t\t\t\tlogs[\"train_loss\"].append(train_loss_mean)\n\t\t\t\tlogs[\"train_WER\"].append(train_wer_mean)\n\t\t\t\tlogs[\"train_EER\"].append(train_eer_mean)\n\n\t\t\t\tlogs[\"test_loss\"].append(test_loss_mean)\n\t\t\t\tlogs[\"test_WER\"].append(test_wer_mean)\n\t\t\t\tlogs[\"test_EER\"].append(test_eer_mean)\n\n\t\t\t\tprint(\"[INFO] Epoch {:03d}: train_loss: {:.3f} train_WER: {} train_EER: {} test_loss: {:.3f} test_WER: {} test_EER: {}\"\n\t\t\t\t\t.format(epoch, train_loss_mean, train_wer_mean, train_eer_mean, test_loss_mean, test_wer_mean, test_eer_mean))\n\n\t\t\t# Guardando log en csv\n\t\t\tdf = pd.DataFrame(logs, columns=columnas)\n\t\t\tdf.to_csv(self.logs_path+self.nombre+\"/logs.csv\", index=False)\n\n\t\t\te = range(0, epoch+1)\n\t\t\tplt.clf()\n\t\t\tplt.plot(e, logs[\"train_loss\"], label=\"train_loss\")\n\t\t\tplt.plot(e, logs[\"test_loss\"], label=\"test_loss\")\n\t\t\tplt.legend()\n\t\t\tplt.draw()\n\t\t\tplt.pause(0.001)\n\t\t\t#plt.show(block=False)\n\n\[email protected]\n\tdef decode(self, y_, sequence_length):\n\t\tb, f, _ = y_.get_shape()\n\t\tsequence_length = tf.fill([b, 1], f)\n\t\tsequence_length = tf.reshape(sequence_length, [-1])\n\t\ty_ = tf.transpose(y_, [1, 0, 2])\n\t\tdecoded, _ = tf.nn.ctc_greedy_decoder(y_, sequence_length)\n\n\t\treturn decoded[0]\n\n\tdef decode_cadenas(self, decoded):\n\t\tencodedLabelStrs = [[] for i in range(20)]\n\t\tidxDict = { b : [] for b in range(20) }\n\n\t\tfor (idx, idx2d) in enumerate(decoded.indices):\n\t\t\tlabel = decoded.values[idx]\n\t\t\tbatchElement = idx2d[0] # index according to [b,t]\n\t\t\tencodedLabelStrs[batchElement].append(label)\n\n\t\tcadenas = []\n\t\tfor strs in encodedLabelStrs:\n\t\t\tcadena = self.vocabulario.decodificar(strs)\n\t\t\tcadenas.append(cadena)\n\n\t\treturn cadenas\n\n\tdef decode_input(self, y):\n\t\tl, nl, nf = y\n\n\t\tcadenas_y = []\n\t\tfor i, n in enumerate(nl):\n\t\t\tcadena_y = self.vocabulario.decodificar(l[i, :n[0]])\n\t\t\tcadenas_y.append(cadena_y)\n\n\t\treturn cadenas_y\n\n\t# Encuentrar error rate\n\tdef EER(self, cadenas_Y):\n\t\teer = []\n\t\tfor c in cadenas_Y:\n\t\t\tencuentra = False\n\t\t\tfor p in c.split():\n\t\t\t\tif p == \"encuentra\" and not encuentra:\n\t\t\t\t\tencuentra = True\n\t\t\t\t\teer.append(0.0)\n\t\t\n\t\t\tif not encuentra:\n\t\t\t\teer.append(1.0)\n\n\t\treturn eer\n\n\tdef WER(self, cadenas, cadenas_y):\n\t\terr_wer = []\n\t\t#print(\"CADENAS\")\n\t\t#print(len(cadenas))\n\t\t#print(len(cadenas_y))\n\t\tfor i, c in enumerate(cadenas):\n\t\t\tif i < len(cadenas_y):\n\t\t\t\tcy = cadenas_y[i]\n\t\t\t\tnw = len(cy.split())\n\t\t\t\terr = wer(c.split(), cy.split())\n\n\t\t\t\terr_wer.append(err/nw)\n\t\t\telse:\n\t\t\t\terr_wer.append(1.0)\n\n\t\t\t\n\t\treturn err_wer\n\n\tdef printDecoded(self, cadenas, cadenas_y, tipo=\"\"):\n\t\t\"\"\"\n\t\tfor i, c in enumerate(cadenas):\n\t\t\tcy = cadenas_y[i]\n\t\t\tprint(\"CADENA ORIGINAL\")\n\t\t\tprint(cy)\n\t\t\tprint(\"CADENA MODELO\")\n\t\t\tprint(c)\n\t\t\"\"\"\n\n\t\tcolumnas = [\"original\", \"prediccion\"]\n\t\tdatos = {\n\t\t\t\"original\": cadenas_y,\n\t\t\t\"prediccion\": cadenas,\n\t\t}\n\n\t\tdf = pd.DataFrame(datos, columns=columnas)\n\t\tdf.to_csv(self.logs_path+self.nombre+\"/\"+tipo+\"prediccion.csv\", index=False)\n\n\n\t#@tf.function\n\tdef train_epoch(self, optimizer, train, epoch):\n\t\tepoch_loss = [] \n\t\tWER = []\n\t\tEER = []\n\t\n\t\ttf.summary.trace_off()\n\t\tfor x, y in train:\n\t\t\ty_, loss_value, grads = self.grad(x, y)\n\t\t\toptimizer.apply_gradients(zip(grads, self.model.trainable_variables))\n\n\t\t\tl, nl, nf = y\n\n\t\t\td = self.decode(y_, nf)\n\t\t\tcadenas = self.decode_cadenas(d)\n\t\t\tcadenas_y = self.decode_input(y)\n\n\t\t\tWER.append(self.WER(cadenas, cadenas_y))\n\t\t\tEER.append(self.EER(cadenas))\n\n\t\t\tif epoch % 10 == 0:\n\t\t\t\tself.printDecoded(cadenas, cadenas_y, tipo=\"train\")\n\n\t\t\tepoch_loss.append(loss_value)\n\n\t\ttf.summary.trace_on()\n\n\t\treturn epoch_loss, WER, EER\n\n\tdef test_epoch(self, test, epoch):\n\t\tepoch_loss = [] \n\t\tWER = []\n\t\tEER = []\n\t\n\t\tfor x, y in test:\n\t\t\ty_, loss_value = self.loss(x, y, training=False)\n\n\t\t\tl, nl, nf = y\n\n\t\t\td = self.decode(y_, nf)\n\t\t\tcadenas = self.decode_cadenas(d)\n\t\t\tcadenas_y = self.decode_input(y)\n\n\t\t\tWER.append(self.WER(cadenas, cadenas_y))\n\t\t\tEER.append(self.EER(cadenas))\n\n\t\t\tif epoch % 10 == 0:\n\t\t\t\tself.printDecoded(cadenas, cadenas_y, tipo=\"test\")\n\n\t\t\tepoch_loss.append(loss_value)\n\n\t\treturn epoch_loss, WER, EER\n\n\n\t\t\n\n\n"
] |
[
[
"tensorflow.nn.ctc_greedy_decoder",
"tensorflow.summary.trace_on",
"tensorflow.summary.trace_off",
"tensorflow.fill",
"tensorflow.transpose",
"matplotlib.pyplot.legend",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"pandas.DataFrame",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.plot",
"tensorflow.keras.optimizers.Adam",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.pause",
"tensorflow.GradientTape"
]
] |
GT-SALT/CODA
|
[
"7d3ae5d79288813c66f49701a1f019e8f61d18d6"
] |
[
"seq2seq/calculate_influence.py"
] |
[
"#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for sequence to sequence.\n\"\"\"\n# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.\n\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import Optional\n\nimport pickle\n\nfrom rouge import Rouge, FilesRouge\n\nimport nltk # Here to have a nice missing dependency error message early on\nimport numpy as np\nfrom datasets import load_dataset, load_metric, concatenate_datasets\n\nimport transformers\nfrom filelock import FileLock\nfrom transformers import (\n AutoConfig,\n AutoModelForSeq2SeqLM,\n AutoTokenizer,\n DataCollatorForSeq2Seq,\n HfArgumentParser,\n Seq2SeqTrainer,\n Seq2SeqTrainingArguments,\n set_seed,\n)\nfrom transformers.file_utils import is_offline_mode\nfrom transformers.trainer_utils import get_last_checkpoint, is_main_process\nfrom transformers.utils import check_min_version\n\n\n# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\ncheck_min_version(\"4.5.0.dev0\")\n\nlogger = logging.getLogger(__name__)\n\ntry:\n nltk.data.find(\"tokenizers/punkt\")\nexcept (LookupError, OSError):\n if is_offline_mode():\n raise LookupError(\n \"Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files\"\n )\n with FileLock(\".lock\") as lock:\n nltk.download(\"punkt\", quiet=True)\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Where to store the pretrained models downloaded from huggingface.co\"},\n )\n use_fast_tokenizer: bool = field(\n default=True,\n metadata={\"help\": \"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"},\n )\n model_revision: str = field(\n default=\"main\",\n metadata={\"help\": \"The specific model version to use (can be a branch name, tag name or commit id).\"},\n )\n use_auth_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Will use the token generated when running `transformers-cli login` (necessary to use this script \"\n \"with private models).\"\n },\n )\n reweight: bool = field(\n default=False,\n metadata={\"help\": \"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"},\n )\n dropout: float = field(\n default=0.1,\n metadata={\"help\": \"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"},\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n dataset_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The name of the dataset to use (via the datasets library).\"}\n )\n dataset_config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The configuration name of the dataset to use (via the datasets library).\"}\n )\n text_column: Optional[str] = field(\n default=None,\n metadata={\"help\": \"The name of the column in the datasets containing the full texts (for summarization).\"},\n )\n summary_column: Optional[str] = field(\n default=None,\n metadata={\"help\": \"The name of the column in the datasets containing the summaries (for summarization).\"},\n )\n train_file: Optional[str] = field(\n default=None, metadata={\"help\": \"The input training data file (a jsonlines or csv file).\"}\n )\n validation_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"An optional input evaluation data file to evaluate the metrics (rouge) on \"\n \"(a jsonlines or csv file).\"\n },\n )\n test_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"An optional input test data file to evaluate the metrics (rouge) on \" \"(a jsonlines or csv file).\"\n },\n )\n unlabel_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"An optional input unlabel data file to evaluate the metrics (rouge) on \" \"(a jsonlines or csv file).\"\n },\n )\n \n \n noise_type: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"noise added to training data.\"\n },\n ) \n \n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\"help\": \"The number of processes to use for the preprocessing.\"},\n )\n max_source_length: Optional[int] = field(\n default=1024,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n max_target_length: Optional[int] = field(\n default=128,\n metadata={\n \"help\": \"The maximum total sequence length for target text after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n val_max_target_length: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"The maximum total sequence length for validation target text after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.\"\n \"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used \"\n \"during ``evaluate`` and ``predict``.\"\n },\n )\n pad_to_max_length: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to pad all samples to model maximum sentence length. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch. More \"\n \"efficient on GPU but very bad for TPU.\"\n },\n )\n max_train_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of training examples to this \"\n \"value if set.\"\n },\n )\n max_val_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of validation examples to this \"\n \"value if set.\"\n },\n )\n max_test_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of test examples to this \"\n \"value if set.\"\n },\n )\n num_beams: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Number of beams to use for evaluation. This argument will be passed to ``model.generate``, \"\n \"which is used during ``evaluate`` and ``predict``.\"\n },\n )\n ignore_pad_token_for_loss: bool = field(\n default=True,\n metadata={\n \"help\": \"Whether to ignore the tokens corresponding to padded labels in the loss computation or not.\"\n },\n )\n source_prefix: Optional[str] = field(\n default=None, metadata={\"help\": \"A prefix to add before every source text (useful for T5 models).\"}\n )\n\n def __post_init__(self):\n if self.dataset_name is None and self.train_file is None and self.validation_file is None:\n raise ValueError(\"Need either a dataset name or a training/validation file.\")\n else:\n if self.train_file is not None:\n extension = self.train_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`train_file` should be a csv or a json file.\"\n if self.validation_file is not None:\n extension = self.validation_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`validation_file` should be a csv or a json file.\"\n if self.val_max_target_length is None:\n self.val_max_target_length = self.max_target_length\n\n\nsummarization_name_mapping = {\n \"amazon_reviews_multi\": (\"review_body\", \"review_title\"),\n \"big_patent\": (\"description\", \"abstract\"),\n \"cnn_dailymail\": (\"article\", \"highlights\"),\n \"orange_sum\": (\"text\", \"summary\"),\n \"pn_summary\": (\"article\", \"summary\"),\n \"psc\": (\"extract_text\", \"summary_text\"),\n \"samsum\": (\"dialogue\", \"summary\"),\n \"thaisum\": (\"body\", \"summary\"),\n \"xglue\": (\"news_body\", \"news_title\"),\n \"xsum\": (\"document\", \"summary\"),\n \"wiki_summary\": (\"article\", \"highlights\"),\n}\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n if data_args.source_prefix is None and model_args.model_name_or_path in [\n \"t5-small\",\n \"t5-base\",\n \"t5-large\",\n \"t5-3b\",\n \"t5-11b\",\n ]:\n logger.warning(\n \"You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with \"\n \"`--source_prefix 'summarize: ' `\"\n )\n\n # Detecting last checkpoint.\n last_checkpoint = None\n if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:\n last_checkpoint = get_last_checkpoint(training_args.output_dir)\n if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. \"\n \"Use --overwrite_output_dir to overcome.\"\n )\n elif last_checkpoint is not None:\n logger.info(\n f\"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change \"\n \"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\n )\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n handlers=[logging.StreamHandler(sys.stdout)],\n )\n logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)\n\n # Log on each process the small summary:\n logger.warning(\n f\"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\n + f\"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}\"\n )\n # Set the verbosity to info of the Transformers logger (on main process only):\n if is_main_process(training_args.local_rank):\n transformers.utils.logging.set_verbosity_info()\n logger.info(\"Training/evaluation parameters %s\", training_args)\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)\n # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n # (the dataset will be downloaded automatically from the datasets Hub).\n #\n # For CSV/JSON files this script will use the first column for the full texts and the second column for the\n # summaries (unless you specify column names for this with the `text_column` and `summary_column` arguments).\n #\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if data_args.dataset_name is not None:\n # Downloading and loading a dataset from the hub.\n datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)\n else:\n data_files = {}\n if data_args.train_file is not None:\n data_files[\"train\"] = data_args.train_file\n extension = data_args.train_file.split(\".\")[-1]\n if data_args.validation_file is not None:\n data_files[\"validation\"] = data_args.validation_file\n extension = data_args.validation_file.split(\".\")[-1]\n if data_args.test_file is not None:\n data_files[\"test\"] = data_args.test_file\n extension = data_args.test_file.split(\".\")[-1]\n if data_args.unlabel_file is not None:\n data_files[\"ulbl\"] = data_args.unlabel_file\n extension = data_args.test_file.split(\".\")[-1]\n \n #data_files[\"train\"] = [data_args.train_file, ]\n #extension = data_args.train_file.split(\".\")[-1]\n \n datasets = load_dataset(extension, data_files=data_files)\n # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n # Load pretrained model and tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n config = AutoConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n \n config.reweight = model_args.reweight\n\n config.attention_dropout = model_args.dropout\n config.dropout = model_args.dropout\n\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n use_fast=model_args.use_fast_tokenizer,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n model = AutoModelForSeq2SeqLM.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n\n if model.config.decoder_start_token_id is None:\n raise ValueError(\"Make sure that `config.decoder_start_token_id` is correctly defined\")\n\n prefix = data_args.source_prefix if data_args.source_prefix is not None else \"\"\n\n # Preprocessing the datasets.\n # We need to tokenize inputs and targets.\n if training_args.do_train:\n column_names = datasets[\"train\"].column_names\n elif training_args.do_eval:\n column_names = datasets[\"validation\"].column_names\n elif training_args.do_predict:\n column_names = datasets[\"test\"].column_names\n elif training_args.do_semi_train:\n column_names = datasets[\"ulbl\"].column_names\n else:\n logger.info(\"There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.\")\n return\n\n # Get the column names for input/target.\n dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None)\n if data_args.text_column is None:\n text_column = dataset_columns[0] if dataset_columns is not None else column_names[0]\n else:\n text_column = data_args.text_column\n if text_column not in column_names:\n raise ValueError(\n f\"--text_column' value '{data_args.text_column}' needs to be one of: {', '.join(column_names)}\"\n )\n if data_args.summary_column is None:\n summary_column = dataset_columns[1] if dataset_columns is not None else column_names[1]\n else:\n summary_column = data_args.summary_column\n if summary_column not in column_names:\n raise ValueError(\n f\"--summary_column' value '{data_args.summary_column}' needs to be one of: {', '.join(column_names)}\"\n )\n\n # Temporarily set max_target_length for training.\n max_target_length = data_args.max_target_length\n padding = \"max_length\" if data_args.pad_to_max_length else False\n\n if training_args.label_smoothing_factor > 0 and not hasattr(model, \"prepare_decoder_input_ids_from_labels\"):\n logger.warn(\n \"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for\"\n f\"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory\"\n )\n\n def preprocess_function(examples):\n inputs = examples[text_column]\n targets = examples[summary_column]\n inputs = [prefix + inp for inp in inputs]\n model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)\n\n # Setup the tokenizer for targets\n with tokenizer.as_target_tokenizer():\n labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)\n\n # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore\n # padding in the loss.\n if padding == \"max_length\" and data_args.ignore_pad_token_for_loss:\n labels[\"input_ids\"] = [\n [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels[\"input_ids\"]\n ]\n\n model_inputs[\"labels\"] = labels[\"input_ids\"]\n return model_inputs\n\n if training_args.do_train and not training_args.do_semi_train:\n train_dataset = datasets[\"train\"]\n if \"train\" not in datasets:\n raise ValueError(\"--do_train requires a train dataset\")\n if data_args.max_train_samples is not None:\n train_dataset = train_dataset.select(range(data_args.max_train_samples))\n train_dataset = train_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not data_args.overwrite_cache,\n ) \n elif training_args.do_train and training_args.do_semi_train:\n train_ulbl_dataset = datasets[\"ulbl\"]\n if \"ulbl\" not in datasets:\n raise ValueError(\"--do_semitrain requires a ulbl train dataset\")\n \n train_dataset = datasets[\"train\"]\n if \"train\" not in datasets:\n raise ValueError(\"--do_train requires a train dataset\")\n if data_args.max_train_samples is not None:\n train_dataset = train_dataset.select(range(data_args.max_train_samples))\n \n train_dataset = concatenate_datasets([train_dataset, train_ulbl_dataset]) \n train_dataset = train_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n if training_args.do_eval:\n max_target_length = data_args.val_max_target_length\n if \"validation\" not in datasets:\n raise ValueError(\"--do_eval requires a validation dataset\")\n eval_dataset = datasets[\"validation\"]\n if data_args.max_val_samples is not None:\n eval_dataset = eval_dataset.select(range(data_args.max_val_samples))\n eval_dataset = eval_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n if training_args.do_predict:\n max_target_length = data_args.val_max_target_length\n if \"test\" not in datasets:\n raise ValueError(\"--do_predict requires a test dataset\")\n test_dataset = datasets[\"test\"]\n if data_args.max_test_samples is not None:\n test_dataset = test_dataset.select(range(data_args.max_test_samples))\n test_dataset = test_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not data_args.overwrite_cache,\n )\n\n # Data collator\n label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id\n \n \n \n\n data_collator = DataCollatorForSeq2Seq(\n tokenizer,\n model=model,\n label_pad_token_id=label_pad_token_id,\n pad_to_multiple_of=8 if training_args.fp16 else None,\n noise_type = None,)\n \n \n\n # Metric\n metric = load_metric(\"rouge\")\n\n \n def postprocess_text(preds, labels):\n preds_temp = [pred.strip() for pred in preds]\n labels = [label.strip() for label in labels]\n \n preds = []\n for i in range(0,len(preds_temp)):\n if len(preds_temp) == 0:\n preds.append('[NULL]')\n else:\n preds.append(preds_temp[i])\n \n # rougeLSum expects newline after each sentence\n preds = [\"\\n\".join(nltk.sent_tokenize(pred)) for pred in preds]\n labels = [\"\\n\".join(nltk.sent_tokenize(label)) for label in labels]\n\n return preds, labels\n\n def compute_metrics(eval_preds):\n preds, labels = eval_preds\n if isinstance(preds, tuple):\n preds = preds[0]\n decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)\n if data_args.ignore_pad_token_for_loss:\n # Replace -100 in the labels as we can't decode them.\n labels = np.where(labels != -100, labels, tokenizer.pad_token_id)\n decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n\n # Some simple post-processing\n decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)\n \n #rouge = Rouge()\n #result = rouge.get_scores(decoded_preds, decoded_labels, avg = True)\n result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)\n # Extract a few results from ROUGE\n result = {key: value.mid.fmeasure * 100 for key, value in result.items()}\n \n #result = {key: value['f'] * 100 for key, value in result.items()}\n\n prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]\n result[\"gen_len\"] = np.mean(prediction_lens)\n result = {k: round(v, 4) for k, v in result.items()}\n return result\n\n # Initialize our Trainer\n trainer = Seq2SeqTrainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset if training_args.do_train else None,\n eval_dataset=eval_dataset if training_args.do_eval else None,\n tokenizer=tokenizer,\n data_collator=data_collator,\n eval_data_collator = data_collator,\n compute_metrics=compute_metrics if training_args.predict_with_generate else None,\n )\n\n \n\n # Evaluation\n results = {}\n\n \n logger.info(\"*** Evaluate ***\")\n\n influence_scores = trainer.cal_influence()\n \n output_test_preds_file = os.path.join(training_args.output_dir, \"influence_scores.pkl\")\n \n with open(output_test_preds_file, 'wb') as f:\n pickle.dump(influence_scores, f)\n \n return results\n\n\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.mean",
"numpy.where",
"numpy.count_nonzero"
]
] |
caseygrun/plates
|
[
"156069487560d0c72f080f7e45a4dc2ae7a466ac"
] |
[
"microplates/io/tecan.py"
] |
[
"import pandas as pd\n\ndef read_single(path,header=None,nrows=8,measure='OD600',blank=None, **kwargs):\n # from IPython.core.debugger import set_trace; set_trace()\n\n # try to guess the header row if not provided\n if header is None:\n df = pd.read_excel(path, **kwargs)\n try:\n header = [row[1] for row in df.itertuples()].index(\"<>\")+1\n except ValueError as e:\n raise Exception(\"Unrecognized data format; could not find \"+\n \"an appropriate header in Excel file \"+path+\". \\n\"+repr(kwargs))\n\n # read raw data from excel file, excluding prefix at beginning of file\n # and suffix at end\n df = pd.read_excel(path,header=header,index_col=0,nrows=nrows, na_values=['OVER'], **kwargs)\n\n assert df.index.name == \"<>\", (\"Unrecognized data format; make sure `header` is set \"+\n \"to the row right above '<>' in Excel file \"+path+\". header = \"+str(header))\n\n # data will be an array mirroring the layout of the physical microtiter plate\n # `index` will be the rows, `columns` will be the physical column, and values\n # are OD600\n df.index.rename('row',inplace=True)\n df.columns.rename('column',inplace=True)\n df = df.reset_index()\n\n # convert dataframe from \"wide\" to \"long\" format (one well per row)\n df = df.melt(id_vars=['row'],var_name='column',value_name=measure).reset_index()\n\n # generate a column showing the well name (e.g. A1) from column and row\n df['well'] = df['row'] + df['column'].map(str)\n df = df.set_index('well')\n df = df.drop(columns=['row','column','index'])\n\n # subtract and drop the blank well if given\n if blank is not None:\n df[measure] = df[measure] - df.loc[blank, measure]\n df = df.drop(index=blank)\n return df\n\ndef read_multiple(path,header=None,nrows=96,measure='OD600',blank=None,na_values=['OVER'],npositions=None,**kwargs):\n\n if header is None:\n df = pd.read_excel(path,**kwargs)\n for row in df.itertuples():\n if row[1] == \"Well\":\n header = row[0]+1\n break\n assert header is not None, (\"Unrecognized data format; could not find \"+\n \"an appropriate header in Excel file \"+path)\n\n # read raw data from excel file, excluding prefix at beginning of file\n # and suffix at end\n if npositions is not None:\n kwargs['parse_cols'] = npositions+2\n df = pd.read_excel(path,header=header,index_col=0,nrows=nrows, **kwargs)\n\n assert df.index.name == \"Well\", (\"Unrecognized data format; make sure `header` is set \"+\n \"to the row right above '<>' in Excel file \"+path)\n\n # collect data into \"tidy\" format, with one row per observation, one column for each variable (well, time, OD600)\n df.columns = df.columns.rename('position')\n df = df.drop(columns=[\"Mean\",\"StDev\"])\n df.index = df.index.rename('well')\n df = df.reset_index()\n df = df.melt(id_vars=['well'],value_name=measure)\n\n # subtract and drop the blank well if given\n if blank is not None:\n df[measure] = df[measure] - df.loc[blank, measure].mean()\n df = df.drop(index=blank)\n return df\n\n\ndef read_timecourse(path,header=None,platemap=None,**kwargs):\n if header is None:\n df = pd.read_excel(path)\n for row in df.itertuples():\n if row[1] == \"Time [s]\":\n header = row[0]+1\n break\n assert header is not None, (\"Unrecognized data format; could not find \"+\n \"an appropriate header in Excel file \"+path)\n\n data = pd.read_excel(path,\n sheet_name=0, header=header, skip_footer=4, index_col=0)\n\n # Extract temperature vs. time as separate variable\n temps = data.loc['Temp. [°C]',:]\n # times = data.loc['Time']\n\n data = data.drop('Temp. [°C]')\n\n # collect data into \"tidy\" format, with one row per observation, one column for each variable (well, time, OD600)\n data.columns = data.columns.rename('time')\n data.index = data.index.rename('well')\n data = data.reset_index()\n data = data.melt(id_vars=['well'],value_name='OD600')\n\n # for each combination of (time, strain, NO3), subtract the OD600 of the concentration = 0, no inoculum well\n #data = plates.calc_norm(data,value='OD600',on='strain',columns=['time','strain','NO3'],\n # how=lambda x: x - x.loc['none'].mean())\n\n # convert time series data from seconds to hours\n data['time'] = data['time']/3600.0\n data = data.fillna(value={'sterile':0})\n\n return data\n"
] |
[
[
"pandas.read_excel"
]
] |
BlueHC/TTHack-2018--Traffic-Guide-1
|
[
"6a720e268d18c8e090c12b874336d17e89a183bb",
"6a720e268d18c8e090c12b874336d17e89a183bb"
] |
[
"predict.py",
"routing.py"
] |
[
"import numpy as np\nimport torch\nfrom torch.autograd import Variable\nfrom neural_net import HVVNet\n\nclass Predictor():\n def __init__(self, insize, outsize, model_path):\n self.model = HVVNet(insize, outsize)\n self.model.load_state_dict(torch.load(model_path))\n self.model.eval()\n \n def predict(self, scenarios, weights):\n # deprecated\n distribution = np.array([0.0]*4)\n for scenario, weight in zip(scenarios, weights):\n scenario = Variable(torch.from_numpy(scenario).float())\n output = self.model(scenario).data.numpy()\n output /= sum(output)\n distribution += output * weight\n \n return distribution\n \n def predict2(self, disruption):\n disruption = Variable(torch.from_numpy(disruption).float())\n output = self.model(disruption).data.numpy()\n return output # /= sum(output) (normalization?)\n \nif __name__ == \"__main__\":\n model_path = \"learning-rate=0.001_weight-decay=1e-05.model\"\n p = Predictor(4, 4, model_path)\n example = [np.array([3,4,2,1])]\n print(p.predict(example, np.array([1])))\n",
"import pandas as pd\n\ndef load_routes(stops=\"data/stops.txt\", stop_sequences=\"data/route_id_to_stop_sequence_new!!.csv\"):\n stop_df = pd.read_csv(filepath_or_buffer=stops, sep=\",\")\n sequence_df = pd.read_csv(filepath_or_buffer=stop_sequences, sep=\",\")\n df = pd.merge(sequence_df, stop_df[['stop_id', 'stop_name']], on=\"stop_id\", how=\"left\")\n routes = []\n current_route = df[\"route_id\"].iloc[0]\n current_stops = []\n for i, row in df.iterrows():\n new_route = row[\"route_id\"]\n if current_route != new_route:\n routes.append((current_route, current_stops))\n current_route = new_route\n current_stops = []\n current_stops.append(row[\"stop_name\"]) \n routes = pd.DataFrame(routes) \n routes.columns = [\"id\", \"stations\"]\n return routes\n\n"
] |
[
[
"numpy.array",
"torch.from_numpy",
"torch.load"
],
[
"pandas.merge",
"pandas.read_csv",
"pandas.DataFrame"
]
] |
KarthickTk/RTA-project
|
[
"f9cd18ecb2ae4952ad0233d48b57023924f46d7a"
] |
[
"app.py"
] |
[
"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport joblib\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom prediction import get_prediction, ordinal_encoder\n\nmodel = joblib.load(r'Notebook/Extratreeclassifier_model.joblib')\n\nst.set_page_config(page_title=\"Accident Severity Prediction App\",\n page_icon=\"🚧\", layout=\"wide\") \n\n\n#creating option list for dropdown menu\noptions_day = ['Sunday', \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"]\noptions_age = ['18-30', '31-50', 'Over 51', 'Unknown', 'Under 18']\n\noptions_acc_area = ['Other', 'Office areas', 'Residential areas', ' Church areas',\n ' Industrial areas', 'School areas', ' Recreational areas',\n ' Outside rural areas', ' Hospital areas', ' Market areas',\n 'Rural village areas', 'Unknown', 'Rural village areasOffice areas',\n 'Recreational areas']\n \noptions_cause = ['No distancing', 'Changing lane to the right',\n 'Changing lane to the left', 'Driving carelessly',\n 'No priority to vehicle', 'Moving Backward',\n 'No priority to pedestrian', 'Other', 'Overtaking',\n 'Driving under the influence of drugs', 'Driving to the left',\n 'Getting off the vehicle improperly', 'Driving at high speed',\n 'Overturning', 'Turnover', 'Overspeed', 'Overloading', 'Drunk driving',\n 'Unknown', 'Improper parking']\noptions_vehicle_type = ['Automobile', 'Lorry (41-100Q)', 'Other', 'Pick up upto 10Q',\n 'Public (12 seats)', 'Stationwagen', 'Lorry (11-40Q)',\n 'Public (13-45 seats)', 'Public (> 45 seats)', 'Long lorry', 'Taxi',\n 'Motorcycle', 'Special vehicle', 'Ridden horse', 'Turbo', 'Bajaj', 'Bicycle']\noptions_driver_exp = ['5-10yr', '2-5yr', 'Above 10yr', '1-2yr', 'Below 1yr', 'No Licence', 'unknown']\noptions_lanes = ['Two-way (divided with broken lines road marking)', 'Undivided Two way',\n 'other', 'Double carriageway (median)', 'One way',\n 'Two-way (divided with solid lines road marking)', 'Unknown']\n\nfeatures = ['hour','day_of_week','casualties','accident_cause','vehicles_involved','vehicle_type','driver_age','accident_area','driving_experience','lanes']\n\n\nst.markdown(\"<h1 style='text-align: center;'>Accident Severity Prediction App 🚧</h1>\", unsafe_allow_html=True)\ndef main():\n with st.form('prediction_form'):\n\n st.subheader(\"Enter the input for following features:\")\n \n hour = st.slider(\"Pickup Hour: \", 0, 23, value=0, format=\"%d\")\n day_of_week = st.selectbox(\"Select Day of the Week: \", options=options_day)\n casualties = st.slider(\"Hour of Accident: \", 1, 8, value=0, format=\"%d\")\n accident_cause = st.selectbox(\"Select Accident Cause: \", options=options_cause)\n vehicles_involved = st.slider(\"Pickup Hour: \", 1, 7, value=0, format=\"%d\")\n vehicle_type = st.selectbox(\"Select Vehicle Type: \", options=options_vehicle_type)\n driver_age = st.selectbox(\"Select Driver Age: \", options=options_age)\n accident_area = st.selectbox(\"Select Accident Area: \", options=options_acc_area)\n driving_experience = st.selectbox(\"Select Driving Experience: \", options=options_driver_exp)\n lanes = st.selectbox(\"Select Lanes: \", options=options_lanes)\n \n \n submit = st.form_submit_button(\"Predict\")\n\n\n if submit:\n day_of_week = ordinal_encoder(day_of_week, options_day)\n accident_cause = ordinal_encoder(accident_cause, options_cause)\n vehicle_type = ordinal_encoder(vehicle_type, options_vehicle_type)\n driver_age = ordinal_encoder(driver_age, options_age)\n accident_area = ordinal_encoder(accident_area, options_acc_area)\n driving_experience = ordinal_encoder(driving_experience, options_driver_exp) \n lanes = ordinal_encoder(lanes, options_lanes)\n\n\n data = np.array([hour,day_of_week,casualties,accident_cause,vehicles_involved, \n vehicle_type,driver_age,accident_area,driving_experience,lanes]).reshape(1,-1)\n\n pred = get_prediction(data =data, model=model)\n\n st.write(f\"The predicted severity is: {pred[0]}\")\n\nif __name__ == '__main__':\n main()"
] |
[
[
"numpy.array"
]
] |
Daiver/torch_fuze
|
[
"6b7ad568e2d7549c7f0c0d4c309532ac1b92881d"
] |
[
"examples/cifar10/train_dedicated_device.py"
] |
[
"from collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets import CIFAR10\nimport torchvision.transforms as transforms\n\nimport torch_fuze\nimport pretrainedmodels\nimport torchvision\n\n\n# Just copy-paste from https://github.com/catalyst-team/catalyst/blob/master/examples/notebook-example.ipynb\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(16 * 5 * 5, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = x.view(-1, 16 * 5 * 5)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n\nclass Net2(nn.Module):\n\n def __init__(self):\n super().__init__()\n # self.backbone = pretrainedmodels.resnet50()\n resnet = torchvision.models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-2]\n resnet = nn.Sequential(*modules)\n self.backbone = resnet\n self.add_module('backbone', self.backbone)\n self.fc_final = nn.Linear(2048, 10)\n\n def forward(self, x):\n # x = self.backbone.features(x)\n x = self.backbone(x)\n # print(x.shape)\n x = F.adaptive_avg_pool2d(x, (1, 1)).view(-1, 2048)\n x = self.fc_final(x)\n return x\n\n\ndef main():\n print(f\"Torch version: {torch.__version__}, CUDA: {torch.version.cuda}, Fuze version: {torch_fuze.__version__}\")\n\n # lr = 0.01\n # batch_size = 32\n batch_size = 64\n # batch_size = 128\n # device = \"cpu\"\n device = \"cuda:0\"\n\n print(f\"Device: {device}\")\n if device.startswith(\"cuda\"):\n print(f\"GPU name: {torch.cuda.get_device_name(int(device.split(':')[-1]))}\")\n\n trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])\n # if not exist, download mnist dataset\n train_set = CIFAR10(root=\"data/\", train=True, transform=trans, download=True)\n test_set = CIFAR10(root=\"data/\", train=False, transform=trans, download=True)\n\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, pin_memory=False, num_workers=4)\n test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False, pin_memory=False, num_workers=4)\n\n # model = Net()\n model = Net2()\n model = nn.DataParallel(model)\n model.to(device)\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters())\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[5, 8], gamma=0.3)\n\n metrics = OrderedDict([\n (\"loss\", criterion),\n (\"acc\", torch_fuze.metrics.Accuracy())\n ])\n callbacks = [\n torch_fuze.callbacks.ProgressCallback(),\n # torch_fuze.callbacks.BestModelSaverCallback(\n # model, \"checkpoints/best.pt\", metric_name=\"acc\", lower_is_better=False),\n # torch_fuze.callbacks.TensorBoardXCallback(\"logs\", remove_old_logs=True),\n # torch_fuze.callbacks.MLFlowCallback(\n # metrics_to_track={\"valid_loss\", \"valid_acc\", \"train_acc\"},\n # lowest_metrics_to_track={\"valid_loss\"},\n # highest_metrics_to_track={\"valid_acc\"},\n # files_to_save_at_every_batch={\"checkpoints/best.pt\"})\n ]\n trainer = torch_fuze.SupervisedTrainer(model, criterion, device)\n trainer.run(\n train_loader, test_loader, optimizer, scheduler=scheduler, n_epochs=200, callbacks=callbacks, metrics=metrics)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.optim.lr_scheduler.MultiStepLR",
"torch.nn.Sequential",
"torch.nn.CrossEntropyLoss",
"torch.nn.Conv2d",
"torch.utils.data.DataLoader",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.DataParallel"
]
] |
salama4/pandas
|
[
"1d36851ffdc8391b8b60a7628ce5a536180ef13b"
] |
[
"pandas/core/generic.py"
] |
[
"import collections\nfrom datetime import timedelta\nimport functools\nimport gc\nimport json\nimport operator\nimport pickle\nimport re\nfrom textwrap import dedent\nfrom typing import (\n Any,\n Callable,\n Dict,\n FrozenSet,\n Hashable,\n List,\n Mapping,\n Optional,\n Sequence,\n Set,\n Tuple,\n Union,\n)\nimport warnings\nimport weakref\n\nimport numpy as np\n\nfrom pandas._config import config\n\nfrom pandas._libs import Timestamp, iNaT, lib, properties\nfrom pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries, JSONSerializable\nfrom pandas.compat import set_function_name\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.compat.numpy import function as nv\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import Appender, Substitution, rewrite_axis_style_signature\nfrom pandas.util._validators import (\n validate_bool_kwarg,\n validate_fillna_kwargs,\n validate_percentile,\n)\n\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n ensure_object,\n ensure_str,\n is_bool,\n is_bool_dtype,\n is_datetime64_any_dtype,\n is_datetime64tz_dtype,\n is_dict_like,\n is_extension_array_dtype,\n is_float,\n is_integer,\n is_list_like,\n is_number,\n is_numeric_dtype,\n is_object_dtype,\n is_period_arraylike,\n is_re_compilable,\n is_scalar,\n is_timedelta64_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCSeries\nfrom pandas.core.dtypes.inference import is_hashable\nfrom pandas.core.dtypes.missing import isna, notna\n\nimport pandas as pd\nfrom pandas.core import missing, nanops\nimport pandas.core.algorithms as algos\nfrom pandas.core.base import PandasObject, SelectionMixin\nimport pandas.core.common as com\nfrom pandas.core.construction import create_series_with_explicit_dtype\nfrom pandas.core.indexes.api import (\n Index,\n InvalidIndexError,\n MultiIndex,\n RangeIndex,\n ensure_index,\n)\nfrom pandas.core.indexes.datetimes import DatetimeIndex\nfrom pandas.core.indexes.period import Period, PeriodIndex\nimport pandas.core.indexing as indexing\nfrom pandas.core.internals import BlockManager\nfrom pandas.core.missing import find_valid_index\nfrom pandas.core.ops import _align_method_FRAME\n\nfrom pandas.io.formats import format as fmt\nfrom pandas.io.formats.format import DataFrameFormatter, format_percentiles\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.tseries.frequencies import to_offset\n\n# goal is to be able to define the docs close to function, while still being\n# able to share\n_shared_docs: Dict[str, str] = dict()\n_shared_doc_kwargs = dict(\n axes=\"keywords for axes\",\n klass=\"Series/DataFrame\",\n axes_single_arg=\"int or labels for object\",\n args_transpose=\"axes to permute (int or label for object)\",\n optional_by=\"\"\"\n by : str or list of str\n Name or list of names to sort by\"\"\",\n)\n\n# sentinel value to use as kwarg in place of None when None has special meaning\n# and needs to be distinguished from a user explicitly passing None.\nsentinel = object()\n\n\ndef _single_replace(self, to_replace, method, inplace, limit):\n \"\"\"\n Replaces values in a Series using the fill method specified when no\n replacement value is given in the replace method\n \"\"\"\n if self.ndim != 1:\n raise TypeError(\n f\"cannot replace {to_replace} with method {method} on a \"\n f\"{type(self).__name__}\"\n )\n\n orig_dtype = self.dtype\n result = self if inplace else self.copy()\n fill_f = missing.get_fill_func(method)\n\n mask = missing.mask_missing(result.values, to_replace)\n values = fill_f(result.values, limit=limit, mask=mask)\n\n if values.dtype == orig_dtype and inplace:\n return\n\n result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)\n\n if inplace:\n self._update_inplace(result._data)\n return\n\n return result\n\n\nbool_t = bool # Need alias because NDFrame has def bool:\n\n\nclass NDFrame(PandasObject, SelectionMixin):\n \"\"\"\n N-dimensional analogue of DataFrame. Store multi-dimensional in a\n size-mutable, labeled data structure\n\n Parameters\n ----------\n data : BlockManager\n axes : list\n copy : bool, default False\n \"\"\"\n\n _internal_names: List[str] = [\n \"_data\",\n \"_cacher\",\n \"_item_cache\",\n \"_cache\",\n \"_is_copy\",\n \"_subtyp\",\n \"_name\",\n \"_index\",\n \"_default_kind\",\n \"_default_fill_value\",\n \"_metadata\",\n \"__array_struct__\",\n \"__array_interface__\",\n ]\n _internal_names_set: Set[str] = set(_internal_names)\n _accessors: Set[str] = set()\n _deprecations: FrozenSet[str] = frozenset([\"get_values\", \"ix\"])\n _metadata: List[str] = []\n _is_copy = None\n _data: BlockManager\n _attrs: Dict[Optional[Hashable], Any]\n _typ: str\n\n # ----------------------------------------------------------------------\n # Constructors\n\n def __init__(\n self,\n data: BlockManager,\n axes: Optional[List[Index]] = None,\n copy: bool = False,\n dtype: Optional[Dtype] = None,\n attrs: Optional[Mapping[Optional[Hashable], Any]] = None,\n fastpath: bool = False,\n ):\n\n if not fastpath:\n if dtype is not None:\n data = data.astype(dtype)\n elif copy:\n data = data.copy()\n\n if axes is not None:\n for i, ax in enumerate(axes):\n data = data.reindex_axis(ax, axis=i)\n\n object.__setattr__(self, \"_is_copy\", None)\n object.__setattr__(self, \"_data\", data)\n object.__setattr__(self, \"_item_cache\", {})\n if attrs is None:\n attrs = {}\n else:\n attrs = dict(attrs)\n object.__setattr__(self, \"_attrs\", attrs)\n\n def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):\n \"\"\" passed a manager and a axes dict \"\"\"\n for a, axe in axes.items():\n if axe is not None:\n mgr = mgr.reindex_axis(\n axe, axis=self._get_block_manager_axis(a), copy=False\n )\n\n # make a copy if explicitly requested\n if copy:\n mgr = mgr.copy()\n if dtype is not None:\n # avoid further copies if we can\n if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:\n mgr = mgr.astype(dtype=dtype)\n return mgr\n\n # ----------------------------------------------------------------------\n\n @property\n def attrs(self) -> Dict[Optional[Hashable], Any]:\n \"\"\"\n Dictionary of global attributes on this object.\n \"\"\"\n if self._attrs is None:\n self._attrs = {}\n return self._attrs\n\n @attrs.setter\n def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None:\n self._attrs = dict(value)\n\n def _validate_dtype(self, dtype):\n \"\"\" validate the passed dtype \"\"\"\n\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n # a compound dtype\n if dtype.kind == \"V\":\n raise NotImplementedError(\n \"compound dtypes are not implemented\"\n f\" in the {type(self).__name__} constructor\"\n )\n\n return dtype\n\n # ----------------------------------------------------------------------\n # Construction\n\n @property\n def _constructor(self):\n \"\"\"Used when a manipulation result has the same dimensions as the\n original.\n \"\"\"\n raise AbstractMethodError(self)\n\n @property\n def _constructor_sliced(self):\n \"\"\"Used when a manipulation result has one lower dimension(s) as the\n original, such as DataFrame single columns slicing.\n \"\"\"\n raise AbstractMethodError(self)\n\n @property\n def _constructor_expanddim(self):\n \"\"\"Used when a manipulation result has one higher dimension as the\n original, such as Series.to_frame()\n \"\"\"\n raise NotImplementedError\n\n # ----------------------------------------------------------------------\n # Axis\n _AXIS_ALIASES = {\"rows\": 0}\n _AXIS_IALIASES = {0: \"rows\"}\n _stat_axis_number = 0\n _stat_axis_name = \"index\"\n _ix = None\n _AXIS_ORDERS: List[str]\n _AXIS_NUMBERS: Dict[str, int]\n _AXIS_NAMES: Dict[int, str]\n _AXIS_REVERSED: bool\n _info_axis_number: int\n _info_axis_name: str\n _AXIS_LEN: int\n\n @classmethod\n def _setup_axes(cls, axes: List[str], docs: Dict[str, str]):\n \"\"\"\n Provide axes setup for the major PandasObjects.\n\n Parameters\n ----------\n axes : the names of the axes in order (lowest to highest)\n docs : docstrings for the axis properties\n \"\"\"\n info_axis = len(axes) - 1\n axes_are_reversed = len(axes) > 1\n\n cls._AXIS_ORDERS = axes\n cls._AXIS_NUMBERS = {a: i for i, a in enumerate(axes)}\n cls._AXIS_LEN = len(axes)\n cls._AXIS_NAMES = dict(enumerate(axes))\n cls._AXIS_REVERSED = axes_are_reversed\n\n cls._info_axis_number = info_axis\n cls._info_axis_name = axes[info_axis]\n\n # setup the actual axis\n def set_axis(a, i):\n setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))\n cls._internal_names_set.add(a)\n\n if axes_are_reversed:\n for i, a in cls._AXIS_NAMES.items():\n set_axis(a, 1 - i)\n else:\n for i, a in cls._AXIS_NAMES.items():\n set_axis(a, i)\n\n def _construct_axes_dict(self, axes=None, **kwargs):\n \"\"\"Return an axes dictionary for myself.\"\"\"\n d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}\n d.update(kwargs)\n return d\n\n @staticmethod\n def _construct_axes_dict_from(self, axes, **kwargs):\n \"\"\"Return an axes dictionary for the passed axes.\"\"\"\n d = {a: ax for a, ax in zip(self._AXIS_ORDERS, axes)}\n d.update(kwargs)\n return d\n\n def _construct_axes_from_arguments(\n self, args, kwargs, require_all: bool = False, sentinel=None\n ):\n \"\"\"Construct and returns axes if supplied in args/kwargs.\n\n If require_all, raise if all axis arguments are not supplied\n return a tuple of (axes, kwargs).\n\n sentinel specifies the default parameter when an axis is not\n supplied; useful to distinguish when a user explicitly passes None\n in scenarios where None has special meaning.\n \"\"\"\n\n # construct the args\n args = list(args)\n for a in self._AXIS_ORDERS:\n\n # look for a argument by position\n if a not in kwargs:\n try:\n kwargs[a] = args.pop(0)\n except IndexError:\n if require_all:\n raise TypeError(\"not enough/duplicate arguments specified!\")\n\n axes = {a: kwargs.pop(a, sentinel) for a in self._AXIS_ORDERS}\n return axes, kwargs\n\n @classmethod\n def _from_axes(cls, data, axes, **kwargs):\n # for construction from BlockManager\n if isinstance(data, BlockManager):\n return cls(data, **kwargs)\n else:\n if cls._AXIS_REVERSED:\n axes = axes[::-1]\n d = cls._construct_axes_dict_from(cls, axes, copy=False)\n d.update(kwargs)\n return cls(data, **d)\n\n @classmethod\n def _get_axis_number(cls, axis):\n axis = cls._AXIS_ALIASES.get(axis, axis)\n if is_integer(axis):\n if axis in cls._AXIS_NAMES:\n return axis\n else:\n try:\n return cls._AXIS_NUMBERS[axis]\n except KeyError:\n pass\n raise ValueError(f\"No axis named {axis} for object type {cls}\")\n\n @classmethod\n def _get_axis_name(cls, axis):\n axis = cls._AXIS_ALIASES.get(axis, axis)\n if isinstance(axis, str):\n if axis in cls._AXIS_NUMBERS:\n return axis\n else:\n try:\n return cls._AXIS_NAMES[axis]\n except KeyError:\n pass\n raise ValueError(f\"No axis named {axis} for object type {cls}\")\n\n def _get_axis(self, axis):\n name = self._get_axis_name(axis)\n return getattr(self, name)\n\n @classmethod\n def _get_block_manager_axis(cls, axis):\n \"\"\"Map the axis to the block_manager axis.\"\"\"\n axis = cls._get_axis_number(axis)\n if cls._AXIS_REVERSED:\n m = cls._AXIS_LEN - 1\n return m - axis\n return axis\n\n def _get_axis_resolvers(self, axis):\n # index or columns\n axis_index = getattr(self, axis)\n d = dict()\n prefix = axis[0]\n\n for i, name in enumerate(axis_index.names):\n if name is not None:\n key = level = name\n else:\n # prefix with 'i' or 'c' depending on the input axis\n # e.g., you must do ilevel_0 for the 0th level of an unnamed\n # multiiindex\n key = f\"{prefix}level_{i}\"\n level = i\n\n level_values = axis_index.get_level_values(level)\n s = level_values.to_series()\n s.index = axis_index\n d[key] = s\n\n # put the index/columns itself in the dict\n if isinstance(axis_index, MultiIndex):\n dindex = axis_index\n else:\n dindex = axis_index.to_series()\n\n d[axis] = dindex\n return d\n\n def _get_index_resolvers(self):\n d = {}\n for axis_name in self._AXIS_ORDERS:\n d.update(self._get_axis_resolvers(axis_name))\n return d\n\n def _get_space_character_free_column_resolvers(self):\n \"\"\"Return the space character free column resolvers of a dataframe.\n\n Column names with spaces are 'cleaned up' so that they can be referred\n to by backtick quoting.\n Used in :meth:`DataFrame.eval`.\n \"\"\"\n from pandas.core.computation.common import _remove_spaces_column_name\n\n return {_remove_spaces_column_name(k): v for k, v in self.items()}\n\n @property\n def _info_axis(self):\n return getattr(self, self._info_axis_name)\n\n @property\n def _stat_axis(self):\n return getattr(self, self._stat_axis_name)\n\n @property\n def shape(self) -> Tuple[int, ...]:\n \"\"\"\n Return a tuple of axis dimensions\n \"\"\"\n return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)\n\n @property\n def axes(self):\n \"\"\"\n Return index label(s) of the internal NDFrame\n \"\"\"\n # we do it this way because if we have reversed axes, then\n # the block manager shows then reversed\n return [self._get_axis(a) for a in self._AXIS_ORDERS]\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Return an int representing the number of axes / array dimensions.\n\n Return 1 if Series. Otherwise return 2 if DataFrame.\n\n See Also\n --------\n ndarray.ndim : Number of array dimensions.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.ndim\n 1\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.ndim\n 2\n \"\"\"\n return self._data.ndim\n\n @property\n def size(self):\n \"\"\"\n Return an int representing the number of elements in this object.\n\n Return the number of rows if Series. Otherwise return the number of\n rows times number of columns if DataFrame.\n\n See Also\n --------\n ndarray.size : Number of elements in the array.\n\n Examples\n --------\n >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})\n >>> s.size\n 3\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.size\n 4\n \"\"\"\n return np.prod(self.shape)\n\n @property\n def _selected_obj(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\" internal compat with SelectionMixin \"\"\"\n return self\n\n @property\n def _obj_with_exclusions(self: FrameOrSeries) -> FrameOrSeries:\n \"\"\" internal compat with SelectionMixin \"\"\"\n return self\n\n def set_axis(self, labels, axis=0, inplace=False):\n \"\"\"\n Assign desired index to given axis.\n\n Indexes for column or row labels can be changed by assigning\n a list-like or Index.\n\n .. versionchanged:: 0.21.0\n\n The signature is now `labels` and `axis`, consistent with\n the rest of pandas API. Previously, the `axis` and `labels`\n arguments were respectively the first and second positional\n arguments.\n\n Parameters\n ----------\n labels : list-like, Index\n The values for the new index.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to update. The value 0 identifies the rows, and 1\n identifies the columns.\n\n inplace : bool, default False\n Whether to return a new %(klass)s instance.\n\n Returns\n -------\n renamed : %(klass)s or None\n An object of same type as caller if inplace=False, None otherwise.\n\n See Also\n --------\n DataFrame.rename_axis : Alter the name of the index or columns.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n >>> s.set_axis(['a', 'b', 'c'], axis=0)\n a 1\n b 2\n c 3\n dtype: int64\n\n **DataFrame**\n\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n\n Change the row labels.\n\n >>> df.set_axis(['a', 'b', 'c'], axis='index')\n A B\n a 1 4\n b 2 5\n c 3 6\n\n Change the column labels.\n\n >>> df.set_axis(['I', 'II'], axis='columns')\n I II\n 0 1 4\n 1 2 5\n 2 3 6\n\n Now, update the labels inplace.\n\n >>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)\n >>> df\n i ii\n 0 1 4\n 1 2 5\n 2 3 6\n \"\"\"\n if inplace:\n setattr(self, self._get_axis_name(axis), labels)\n else:\n obj = self.copy()\n obj.set_axis(labels, axis=axis, inplace=True)\n return obj\n\n def _set_axis(self, axis, labels):\n self._data.set_axis(axis, labels)\n self._clear_item_cache()\n\n def swapaxes(self, axis1, axis2, copy=True):\n \"\"\"\n Interchange axes and swap values axes appropriately.\n\n Returns\n -------\n y : same as input\n \"\"\"\n i = self._get_axis_number(axis1)\n j = self._get_axis_number(axis2)\n\n if i == j:\n if copy:\n return self.copy()\n return self\n\n mapping = {i: j, j: i}\n\n new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))\n new_values = self.values.swapaxes(i, j)\n if copy:\n new_values = new_values.copy()\n\n return self._constructor(new_values, *new_axes).__finalize__(self)\n\n def droplevel(self, level, axis=0):\n \"\"\"\n Return DataFrame with requested index / column level(s) removed.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n level : int, str, or list-like\n If a string is given, must be the name of a level\n If list-like, elements must be names or positional indexes\n of levels.\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n Returns\n -------\n DataFrame\n DataFrame with requested index / column level(s) removed.\n\n Examples\n --------\n >>> df = pd.DataFrame([\n ... [1, 2, 3, 4],\n ... [5, 6, 7, 8],\n ... [9, 10, 11, 12]\n ... ]).set_index([0, 1]).rename_axis(['a', 'b'])\n\n >>> df.columns = pd.MultiIndex.from_tuples([\n ... ('c', 'e'), ('d', 'f')\n ... ], names=['level_1', 'level_2'])\n\n >>> df\n level_1 c d\n level_2 e f\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n\n >>> df.droplevel('a')\n level_1 c d\n level_2 e f\n b\n 2 3 4\n 6 7 8\n 10 11 12\n\n >>> df.droplevel('level2', axis=1)\n level_1 c d\n a b\n 1 2 3 4\n 5 6 7 8\n 9 10 11 12\n \"\"\"\n labels = self._get_axis(axis)\n new_labels = labels.droplevel(level)\n result = self.set_axis(new_labels, axis=axis, inplace=False)\n return result\n\n def pop(self, item):\n \"\"\"\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : str\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('class')\n 0 bird\n 1 bird\n 2 mammal\n 3 mammal\n Name: class, dtype: object\n\n >>> df\n name max_speed\n 0 falcon 389.0\n 1 parrot 24.0\n 2 lion 80.5\n 3 monkey NaN\n \"\"\"\n result = self[item]\n del self[item]\n try:\n result._reset_cacher()\n except AttributeError:\n pass\n\n return result\n\n def squeeze(self, axis=None):\n \"\"\"\n Squeeze 1 dimensional axis objects into scalars.\n\n Series or DataFrames with a single element are squeezed to a scalar.\n DataFrames with a single column or a single row are squeezed to a\n Series. Otherwise the object is unchanged.\n\n This method is most useful when you don't know if your\n object is a Series or DataFrame, but you do know it has just a single\n column. In that case you can safely call `squeeze` to ensure you have a\n Series.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns', None}, default None\n A specific axis to squeeze. By default, all length-1 axes are\n squeezed.\n\n Returns\n -------\n DataFrame, Series, or scalar\n The projection after squeezing `axis` or all the axes.\n\n See Also\n --------\n Series.iloc : Integer-location based indexing for selecting scalars.\n DataFrame.iloc : Integer-location based indexing for selecting Series.\n Series.to_frame : Inverse of DataFrame.squeeze for a\n single-column DataFrame.\n\n Examples\n --------\n >>> primes = pd.Series([2, 3, 5, 7])\n\n Slicing might produce a Series with a single value:\n\n >>> even_primes = primes[primes % 2 == 0]\n >>> even_primes\n 0 2\n dtype: int64\n\n >>> even_primes.squeeze()\n 2\n\n Squeezing objects with more than one value in every axis does nothing:\n\n >>> odd_primes = primes[primes % 2 == 1]\n >>> odd_primes\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n >>> odd_primes.squeeze()\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n Squeezing is even more effective when used with DataFrames.\n\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])\n >>> df\n a b\n 0 1 2\n 1 3 4\n\n Slicing a single column will produce a DataFrame with the columns\n having only one value:\n\n >>> df_a = df[['a']]\n >>> df_a\n a\n 0 1\n 1 3\n\n So the columns can be squeezed down, resulting in a Series:\n\n >>> df_a.squeeze('columns')\n 0 1\n 1 3\n Name: a, dtype: int64\n\n Slicing a single row from a single column will produce a single\n scalar DataFrame:\n\n >>> df_0a = df.loc[df.index < 1, ['a']]\n >>> df_0a\n a\n 0 1\n\n Squeezing the rows produces a single scalar Series:\n\n >>> df_0a.squeeze('rows')\n a 1\n Name: 0, dtype: int64\n\n Squeezing all axes will project directly into a scalar:\n\n >>> df_0a.squeeze()\n 1\n \"\"\"\n axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)\n return self.iloc[\n tuple(\n 0 if i in axis and len(a) == 1 else slice(None)\n for i, a in enumerate(self.axes)\n )\n ]\n\n def swaplevel(self, i=-2, j=-1, axis=0):\n \"\"\"\n Swap levels i and j in a MultiIndex on a particular axis\n\n Parameters\n ----------\n i, j : int, str (can be mixed)\n Level of index to be swapped. Can pass level name as string.\n\n Returns\n -------\n swapped : same type as caller (new object)\n \"\"\"\n axis = self._get_axis_number(axis)\n result = self.copy()\n labels = result._data.axes[axis]\n result._data.set_axis(axis, labels.swaplevel(i, j))\n return result\n\n # ----------------------------------------------------------------------\n # Rename\n\n def rename(self, *args, **kwargs):\n \"\"\"\n Alter axes input function or functions. Function / dict values must be\n unique (1-to-1). Labels not contained in a dict / Series will be left\n as-is. Extra labels listed don't throw an error. Alternatively, change\n ``Series.name`` with a scalar value (Series only).\n\n Parameters\n ----------\n %(axes)s : scalar, list-like, dict-like or function, optional\n Scalar or list-like will alter the ``Series.name`` attribute,\n and raise on DataFrame.\n dict-like or functions are transformations to apply to\n that axis' values\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Whether to return a new %(klass)s. If True then value of copy is\n ignored.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified\n level.\n errors : {'ignore', 'raise'}, default 'ignore'\n If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,\n or `columns` contains labels that are not present in the Index\n being transformed.\n If 'ignore', existing keys will be renamed and extra keys will be\n ignored.\n\n Returns\n -------\n renamed : %(klass)s (new object)\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis and\n \"errors='raise'\".\n\n See Also\n --------\n NDFrame.rename_axis\n\n Examples\n --------\n\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n >>> s.rename(\"my_name\") # scalar, changes Series.name\n 0 1\n 1 2\n 2 3\n Name: my_name, dtype: int64\n >>> s.rename(lambda x: x ** 2) # function, changes labels\n 0 1\n 1 2\n 4 3\n dtype: int64\n >>> s.rename({1: 3, 2: 5}) # mapping, changes labels\n 0 1\n 3 2\n 5 3\n dtype: int64\n\n Since ``DataFrame`` doesn't have a ``.name`` attribute,\n only mapping-type arguments are allowed.\n\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n >>> df.rename(2)\n Traceback (most recent call last):\n ...\n TypeError: 'int' object is not callable\n\n ``DataFrame.rename`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n >>> df.rename(index=str, columns={\"A\": \"a\", \"B\": \"c\"})\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename(index=str, columns={\"A\": \"a\", \"C\": \"c\"})\n a B\n 0 1 4\n 1 2 5\n 2 3 6\n\n Using axis-style parameters\n\n >>> df.rename(str.lower, axis='columns')\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename({1: 2, 2: 4}, axis='index')\n A B\n 0 1 4\n 2 2 5\n 4 3 6\n\n See the :ref:`user guide <basics.rename>` for more.\n \"\"\"\n axes, kwargs = self._construct_axes_from_arguments(args, kwargs)\n copy = kwargs.pop(\"copy\", True)\n inplace = kwargs.pop(\"inplace\", False)\n level = kwargs.pop(\"level\", None)\n axis = kwargs.pop(\"axis\", None)\n errors = kwargs.pop(\"errors\", \"ignore\")\n if axis is not None:\n # Validate the axis\n self._get_axis_number(axis)\n\n if kwargs:\n raise TypeError(\n \"rename() got an unexpected keyword \"\n f'argument \"{list(kwargs.keys())[0]}\"'\n )\n\n if com.count_not_none(*axes.values()) == 0:\n raise TypeError(\"must pass an index to rename\")\n\n self._consolidate_inplace()\n result = self if inplace else self.copy(deep=copy)\n\n # start in the axis order to eliminate too many copies\n for axis in range(self._AXIS_LEN):\n v = axes.get(self._AXIS_NAMES[axis])\n if v is None:\n continue\n f = com.get_rename_function(v)\n baxis = self._get_block_manager_axis(axis)\n if level is not None:\n level = self.axes[axis]._get_level_number(level)\n\n # GH 13473\n if not callable(v):\n indexer = self.axes[axis].get_indexer_for(v)\n if errors == \"raise\" and len(indexer[indexer == -1]):\n missing_labels = [\n label for index, label in enumerate(v) if indexer[index] == -1\n ]\n raise KeyError(f\"{missing_labels} not found in axis\")\n\n result._data = result._data.rename_axis(\n f, axis=baxis, copy=copy, level=level\n )\n result._clear_item_cache()\n\n if inplace:\n self._update_inplace(result._data)\n else:\n return result.__finalize__(self)\n\n @rewrite_axis_style_signature(\"mapper\", [(\"copy\", True), (\"inplace\", False)])\n def rename_axis(self, mapper=sentinel, **kwargs):\n \"\"\"\n Set the name of the axis for the index or columns.\n\n Parameters\n ----------\n mapper : scalar, list-like, optional\n Value to set the axis name attribute.\n index, columns : scalar, list-like, dict-like or function, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to that axis' values.\n\n Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index``\n and/or ``columns``.\n\n .. versionchanged:: 0.24.0\n\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to rename.\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new Series\n or DataFrame.\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or None if `inplace` is True.\n\n See Also\n --------\n Series.rename : Alter Series index labels or name.\n DataFrame.rename : Alter DataFrame index labels or name.\n Index.rename : Set new names on index.\n\n Notes\n -----\n ``DataFrame.rename_axis`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n The first calling convention will only modify the names of\n the index and/or the names of the Index object that is the columns.\n In this case, the parameter ``copy`` is ignored.\n\n The second calling convention will modify the names of the\n the corresponding index if mapper is a list or a scalar.\n However, if mapper is dict-like or a function, it will use the\n deprecated behavior of modifying the axis *labels*.\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([\"dog\", \"cat\", \"monkey\"])\n >>> s\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n >>> s.rename_axis(\"animal\")\n animal\n 0 dog\n 1 cat\n 2 monkey\n dtype: object\n\n **DataFrame**\n\n >>> df = pd.DataFrame({\"num_legs\": [4, 4, 2],\n ... \"num_arms\": [0, 0, 2]},\n ... [\"dog\", \"cat\", \"monkey\"])\n >>> df\n num_legs num_arms\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis(\"animal\")\n >>> df\n num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n >>> df = df.rename_axis(\"limbs\", axis=\"columns\")\n >>> df\n limbs num_legs num_arms\n animal\n dog 4 0\n cat 4 0\n monkey 2 2\n\n **MultiIndex**\n\n >>> df.index = pd.MultiIndex.from_product([['mammal'],\n ... ['dog', 'cat', 'monkey']],\n ... names=['type', 'name'])\n >>> df\n limbs num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(index={'type': 'class'})\n limbs num_legs num_arms\n class name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(columns=str.upper)\n LIMBS num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n \"\"\"\n axes, kwargs = self._construct_axes_from_arguments(\n (), kwargs, sentinel=sentinel\n )\n copy = kwargs.pop(\"copy\", True)\n inplace = kwargs.pop(\"inplace\", False)\n axis = kwargs.pop(\"axis\", 0)\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n if kwargs:\n raise TypeError(\n \"rename_axis() got an unexpected keyword \"\n f'argument \"{list(kwargs.keys())[0]}\"'\n )\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n if mapper is not sentinel:\n # Use v0.23 behavior if a scalar or list\n non_mapper = is_scalar(mapper) or (\n is_list_like(mapper) and not is_dict_like(mapper)\n )\n if non_mapper:\n return self._set_axis_name(mapper, axis=axis, inplace=inplace)\n else:\n raise ValueError(\"Use `.rename` to alter labels with a mapper.\")\n else:\n # Use new behavior. Means that index and/or columns\n # is specified\n result = self if inplace else self.copy(deep=copy)\n\n for axis in range(self._AXIS_LEN):\n v = axes.get(self._AXIS_NAMES[axis])\n if v is sentinel:\n continue\n non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))\n if non_mapper:\n newnames = v\n else:\n f = com.get_rename_function(v)\n curnames = self._get_axis(axis).names\n newnames = [f(name) for name in curnames]\n result._set_axis_name(newnames, axis=axis, inplace=True)\n if not inplace:\n return result\n\n def _set_axis_name(self, name, axis=0, inplace=False):\n \"\"\"\n Set the name(s) of the axis.\n\n Parameters\n ----------\n name : str or list of str\n Name(s) to set.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to set the label. The value 0 or 'index' specifies index,\n and the value 1 or 'columns' specifies columns.\n inplace : bool, default False\n If `True`, do operation inplace and return None.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n Series, DataFrame, or None\n The same type as the caller or `None` if `inplace` is `True`.\n\n See Also\n --------\n DataFrame.rename : Alter the axis labels of :class:`DataFrame`.\n Series.rename : Alter the index labels or set the index name\n of :class:`Series`.\n Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"num_legs\": [4, 4, 2]},\n ... [\"dog\", \"cat\", \"monkey\"])\n >>> df\n num_legs\n dog 4\n cat 4\n monkey 2\n >>> df._set_axis_name(\"animal\")\n num_legs\n animal\n dog 4\n cat 4\n monkey 2\n >>> df.index = pd.MultiIndex.from_product(\n ... [[\"mammal\"], ['dog', 'cat', 'monkey']])\n >>> df._set_axis_name([\"type\", \"name\"])\n legs\n type name\n mammal dog 4\n cat 4\n monkey 2\n \"\"\"\n axis = self._get_axis_number(axis)\n idx = self._get_axis(axis).set_names(name)\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n renamed = self if inplace else self.copy()\n renamed.set_axis(idx, axis=axis, inplace=True)\n if not inplace:\n return renamed\n\n # ----------------------------------------------------------------------\n # Comparison Methods\n\n def _indexed_same(self, other) -> bool:\n return all(\n self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS\n )\n\n def equals(self, other):\n \"\"\"\n Test whether two objects contain the same elements.\n\n This function allows two Series or DataFrames to be compared against\n each other to see if they have the same shape and elements. NaNs in\n the same location are considered equal. The column headers do not\n need to have the same type, but the elements within the columns must\n be the same dtype.\n\n Parameters\n ----------\n other : Series or DataFrame\n The other Series or DataFrame to be compared with the first.\n\n Returns\n -------\n bool\n True if all elements are the same in both objects, False\n otherwise.\n\n See Also\n --------\n Series.eq : Compare two Series objects of the same length\n and return a Series where each element is True if the element\n in each Series is equal, False otherwise.\n DataFrame.eq : Compare two DataFrame objects of the same shape and\n return a DataFrame where each element is True if the respective\n element in each DataFrame is equal, False otherwise.\n testing.assert_series_equal : Raises an AssertionError if left and\n right are not equal. Provides an easy interface to ignore\n inequality in dtypes, indexes and precision among others.\n testing.assert_frame_equal : Like assert_series_equal, but targets\n DataFrames.\n numpy.array_equal : Return True if two arrays have the same shape\n and elements, False otherwise.\n\n Notes\n -----\n This function requires that the elements have the same dtype as their\n respective elements in the other Series or DataFrame. However, the\n column labels do not need to have the same type, as long as they are\n still considered equal.\n\n Examples\n --------\n >>> df = pd.DataFrame({1: [10], 2: [20]})\n >>> df\n 1 2\n 0 10 20\n\n DataFrames df and exactly_equal have the same types and values for\n their elements and column labels, which will return True.\n\n >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})\n >>> exactly_equal\n 1 2\n 0 10 20\n >>> df.equals(exactly_equal)\n True\n\n DataFrames df and different_column_type have the same element\n types and values, but have different types for the column labels,\n which will still return True.\n\n >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})\n >>> different_column_type\n 1.0 2.0\n 0 10 20\n >>> df.equals(different_column_type)\n True\n\n DataFrames df and different_data_type have different types for the\n same values for their elements, and will return False even though\n their column labels are the same values and types.\n\n >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})\n >>> different_data_type\n 1 2\n 0 10.0 20.0\n >>> df.equals(different_data_type)\n False\n \"\"\"\n if not isinstance(other, self._constructor):\n return False\n return self._data.equals(other._data)\n\n # -------------------------------------------------------------------------\n # Unary Methods\n\n def __neg__(self):\n values = com.values_from_object(self)\n if is_bool_dtype(values):\n arr = operator.inv(values)\n elif (\n is_numeric_dtype(values)\n or is_timedelta64_dtype(values)\n or is_object_dtype(values)\n ):\n arr = operator.neg(values)\n else:\n raise TypeError(f\"Unary negative expects numeric dtype, not {values.dtype}\")\n return self.__array_wrap__(arr)\n\n def __pos__(self):\n values = com.values_from_object(self)\n if is_bool_dtype(values) or is_period_arraylike(values):\n arr = values\n elif (\n is_numeric_dtype(values)\n or is_timedelta64_dtype(values)\n or is_object_dtype(values)\n ):\n arr = operator.pos(values)\n else:\n raise TypeError(f\"Unary plus expects numeric dtype, not {values.dtype}\")\n return self.__array_wrap__(arr)\n\n def __invert__(self):\n if not self.size:\n # inv fails with 0 len\n return self\n\n arr = operator.inv(com.values_from_object(self))\n return self.__array_wrap__(arr)\n\n def __nonzero__(self):\n raise ValueError(\n f\"The truth value of a {type(self).__name__} is ambiguous. \"\n \"Use a.empty, a.bool(), a.item(), a.any() or a.all().\"\n )\n\n __bool__ = __nonzero__\n\n def bool(self):\n \"\"\"\n Return the bool of a single element PandasObject.\n\n This must be a boolean scalar value, either True or False. Raise a\n ValueError if the PandasObject does not have exactly 1 element, or that\n element is not boolean\n\n Returns\n -------\n bool\n Same single boolean value converted to bool type.\n \"\"\"\n v = self.squeeze()\n if isinstance(v, (bool, np.bool_)):\n return bool(v)\n elif is_scalar(v):\n raise ValueError(\n \"bool cannot act on a non-boolean single element \"\n f\"{type(self).__name__}\"\n )\n\n self.__nonzero__()\n\n def __abs__(self):\n return self.abs()\n\n def __round__(self, decimals=0):\n return self.round(decimals)\n\n # -------------------------------------------------------------------------\n # Label or Level Combination Helpers\n #\n # A collection of helper methods for DataFrame/Series operations that\n # accept a combination of column/index labels and levels. All such\n # operations should utilize/extend these methods when possible so that we\n # have consistent precedence and validation logic throughout the library.\n\n def _is_level_reference(self, key, axis=0):\n \"\"\"\n Test whether a key is a level reference for a given axis.\n\n To be considered a level reference, `key` must be a string that:\n - (axis=0): Matches the name of an index level and does NOT match\n a column label.\n - (axis=1): Matches the name of a column level and does NOT match\n an index label.\n\n Parameters\n ----------\n key : str\n Potential level name for the given axis\n axis : int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_level : bool\n \"\"\"\n axis = self._get_axis_number(axis)\n\n return (\n key is not None\n and is_hashable(key)\n and key in self.axes[axis].names\n and not self._is_label_reference(key, axis=axis)\n )\n\n def _is_label_reference(self, key, axis=0) -> bool_t:\n \"\"\"\n Test whether a key is a label reference for a given axis.\n\n To be considered a label reference, `key` must be a string that:\n - (axis=0): Matches a column label\n - (axis=1): Matches an index label\n\n Parameters\n ----------\n key: str\n Potential label name\n axis: int, default 0\n Axis perpendicular to the axis that labels are associated with\n (0 means search for column labels, 1 means search for index labels)\n\n Returns\n -------\n is_label: bool\n \"\"\"\n axis = self._get_axis_number(axis)\n other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)\n\n return (\n key is not None\n and is_hashable(key)\n and any(key in self.axes[ax] for ax in other_axes)\n )\n\n def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t:\n \"\"\"\n Test whether a key is a label or level reference for a given axis.\n\n To be considered either a label or a level reference, `key` must be a\n string that:\n - (axis=0): Matches a column label or an index level\n - (axis=1): Matches an index label or a column level\n\n Parameters\n ----------\n key: str\n Potential label or level name\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n is_label_or_level: bool\n \"\"\"\n return self._is_level_reference(key, axis=axis) or self._is_label_reference(\n key, axis=axis\n )\n\n def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None:\n \"\"\"\n Check whether `key` is ambiguous.\n\n By ambiguous, we mean that it matches both a level of the input\n `axis` and a label of the other axis.\n\n Parameters\n ----------\n key: str or object\n Label or level name.\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns).\n\n Raises\n ------\n ValueError: `key` is ambiguous\n \"\"\"\n axis = self._get_axis_number(axis)\n other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)\n\n if (\n key is not None\n and is_hashable(key)\n and key in self.axes[axis].names\n and any(key in self.axes[ax] for ax in other_axes)\n ):\n\n # Build an informative and grammatical warning\n level_article, level_type = (\n (\"an\", \"index\") if axis == 0 else (\"a\", \"column\")\n )\n\n label_article, label_type = (\n (\"a\", \"column\") if axis == 0 else (\"an\", \"index\")\n )\n\n msg = (\n f\"'{key}' is both {level_article} {level_type} level and \"\n f\"{label_article} {label_type} label, which is ambiguous.\"\n )\n raise ValueError(msg)\n\n def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray:\n \"\"\"\n Return a 1-D array of values associated with `key`, a label or level\n from the given `axis`.\n\n Retrieval logic:\n - (axis=0): Return column values if `key` matches a column label.\n Otherwise return index level values if `key` matches an index\n level.\n - (axis=1): Return row values if `key` matches an index label.\n Otherwise return column level values if 'key' matches a column\n level\n\n Parameters\n ----------\n key: str\n Label or level name.\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n values: np.ndarray\n\n Raises\n ------\n KeyError\n if `key` matches neither a label nor a level\n ValueError\n if `key` matches multiple labels\n FutureWarning\n if `key` is ambiguous. This will become an ambiguity error in a\n future version\n \"\"\"\n axis = self._get_axis_number(axis)\n other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]\n\n if self._is_label_reference(key, axis=axis):\n self._check_label_or_level_ambiguity(key, axis=axis)\n values = self.xs(key, axis=other_axes[0])._values\n elif self._is_level_reference(key, axis=axis):\n values = self.axes[axis].get_level_values(key)._values\n else:\n raise KeyError(key)\n\n # Check for duplicates\n if values.ndim > 1:\n\n if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):\n multi_message = (\n \"\\n\"\n \"For a multi-index, the label must be a \"\n \"tuple with elements corresponding to \"\n \"each level.\"\n )\n else:\n multi_message = \"\"\n\n label_axis_name = \"column\" if axis == 0 else \"index\"\n raise ValueError(\n (\n f\"The {label_axis_name} label '{key}' \"\n f\"is not unique.{multi_message}\"\n )\n )\n\n return values\n\n def _drop_labels_or_levels(self, keys, axis: int = 0):\n \"\"\"\n Drop labels and/or levels for the given `axis`.\n\n For each key in `keys`:\n - (axis=0): If key matches a column label then drop the column.\n Otherwise if key matches an index level then drop the level.\n - (axis=1): If key matches an index label then drop the row.\n Otherwise if key matches a column level then drop the level.\n\n Parameters\n ----------\n keys: str or list of str\n labels or levels to drop\n axis: int, default 0\n Axis that levels are associated with (0 for index, 1 for columns)\n\n Returns\n -------\n dropped: DataFrame\n\n Raises\n ------\n ValueError\n if any `keys` match neither a label nor a level\n \"\"\"\n axis = self._get_axis_number(axis)\n\n # Validate keys\n keys = com.maybe_make_list(keys)\n invalid_keys = [\n k for k in keys if not self._is_label_or_level_reference(k, axis=axis)\n ]\n\n if invalid_keys:\n raise ValueError(\n (\n \"The following keys are not valid labels or \"\n f\"levels for axis {axis}: {invalid_keys}\"\n )\n )\n\n # Compute levels and labels to drop\n levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]\n\n labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]\n\n # Perform copy upfront and then use inplace operations below.\n # This ensures that we always perform exactly one copy.\n # ``copy`` and/or ``inplace`` options could be added in the future.\n dropped = self.copy()\n\n if axis == 0:\n # Handle dropping index levels\n if levels_to_drop:\n dropped.reset_index(levels_to_drop, drop=True, inplace=True)\n\n # Handle dropping columns labels\n if labels_to_drop:\n dropped.drop(labels_to_drop, axis=1, inplace=True)\n else:\n # Handle dropping column levels\n if levels_to_drop:\n if isinstance(dropped.columns, MultiIndex):\n # Drop the specified levels from the MultiIndex\n dropped.columns = dropped.columns.droplevel(levels_to_drop)\n else:\n # Drop the last level of Index by replacing with\n # a RangeIndex\n dropped.columns = RangeIndex(dropped.columns.size)\n\n # Handle dropping index labels\n if labels_to_drop:\n dropped.drop(labels_to_drop, axis=0, inplace=True)\n\n return dropped\n\n # ----------------------------------------------------------------------\n # Iteration\n\n def __hash__(self):\n raise TypeError(\n f\"{repr(type(self).__name__)} objects are mutable, \"\n f\"thus they cannot be hashed\"\n )\n\n def __iter__(self):\n \"\"\"\n Iterate over info axis.\n\n Returns\n -------\n iterator\n Info axis as iterator.\n \"\"\"\n return iter(self._info_axis)\n\n # can we get a better explanation of this?\n def keys(self):\n \"\"\"\n Get the 'info axis' (see Indexing for more).\n\n This is index for Series, columns for DataFrame.\n\n Returns\n -------\n Index\n Info axis.\n \"\"\"\n return self._info_axis\n\n def items(self):\n \"\"\"Iterate over (label, values) on info axis\n\n This is index for Series and columns for DataFrame.\n\n Returns\n -------\n Generator\n \"\"\"\n for h in self._info_axis:\n yield h, self[h]\n\n @Appender(items.__doc__)\n def iteritems(self):\n return self.items()\n\n def __len__(self) -> int:\n \"\"\"Returns length of info axis\"\"\"\n return len(self._info_axis)\n\n def __contains__(self, key) -> bool_t:\n \"\"\"True if the key is in the info axis\"\"\"\n return key in self._info_axis\n\n @property\n def empty(self) -> bool_t:\n \"\"\"\n Indicator whether DataFrame is empty.\n\n True if DataFrame is entirely empty (no items), meaning any of the\n axes are of length 0.\n\n Returns\n -------\n bool\n If DataFrame is empty, return True, if not return False.\n\n See Also\n --------\n Series.dropna\n DataFrame.dropna\n\n Notes\n -----\n If DataFrame contains only NaNs, it is still not considered empty. See\n the example below.\n\n Examples\n --------\n An example of an actual empty DataFrame. Notice the index is empty:\n\n >>> df_empty = pd.DataFrame({'A' : []})\n >>> df_empty\n Empty DataFrame\n Columns: [A]\n Index: []\n >>> df_empty.empty\n True\n\n If we only have NaNs in our DataFrame, it is not considered empty! We\n will need to drop the NaNs to make the DataFrame empty:\n\n >>> df = pd.DataFrame({'A' : [np.nan]})\n >>> df\n A\n 0 NaN\n >>> df.empty\n False\n >>> df.dropna().empty\n True\n \"\"\"\n return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)\n\n # ----------------------------------------------------------------------\n # Array Interface\n\n # This is also set in IndexOpsMixin\n # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented\n __array_priority__ = 1000\n\n def __array__(self, dtype=None):\n return com.values_from_object(self)\n\n def __array_wrap__(self, result, context=None):\n result = lib.item_from_zerodim(result)\n if is_scalar(result):\n # e.g. we get here with np.ptp(series)\n # ptp also requires the item_from_zerodim\n return result\n d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)\n return self._constructor(result, **d).__finalize__(self)\n\n # ideally we would define this to avoid the getattr checks, but\n # is slower\n # @property\n # def __array_interface__(self):\n # \"\"\" provide numpy array interface method \"\"\"\n # values = self.values\n # return dict(typestr=values.dtype.str,shape=values.shape,data=values)\n\n # ----------------------------------------------------------------------\n # Picklability\n\n def __getstate__(self) -> Dict[str, Any]:\n meta = {k: getattr(self, k, None) for k in self._metadata}\n return dict(\n _data=self._data,\n _typ=self._typ,\n _metadata=self._metadata,\n attrs=self.attrs,\n **meta,\n )\n\n def __setstate__(self, state):\n\n if isinstance(state, BlockManager):\n self._data = state\n elif isinstance(state, dict):\n typ = state.get(\"_typ\")\n if typ is not None:\n attrs = state.get(\"_attrs\", {})\n object.__setattr__(self, \"_attrs\", attrs)\n\n # set in the order of internal names\n # to avoid definitional recursion\n # e.g. say fill_value needing _data to be\n # defined\n meta = set(self._internal_names + self._metadata)\n for k in list(meta):\n if k in state:\n v = state[k]\n object.__setattr__(self, k, v)\n\n for k, v in state.items():\n if k not in meta:\n object.__setattr__(self, k, v)\n\n else:\n self._unpickle_series_compat(state)\n elif len(state) == 2:\n self._unpickle_series_compat(state)\n\n self._item_cache = {}\n\n # ----------------------------------------------------------------------\n # Rendering Methods\n\n def __repr__(self) -> str:\n # string representation based upon iterating over self\n # (since, by definition, `PandasContainers` are iterable)\n prepr = f\"[{','.join(map(pprint_thing, self))}]\"\n return f\"{type(self).__name__}({prepr})\"\n\n def _repr_latex_(self):\n \"\"\"\n Returns a LaTeX representation for a particular object.\n Mainly for use with nbconvert (jupyter notebook conversion to pdf).\n \"\"\"\n if config.get_option(\"display.latex.repr\"):\n return self.to_latex()\n else:\n return None\n\n def _repr_data_resource_(self):\n \"\"\"\n Not a real Jupyter special repr method, but we use the same\n naming convention.\n \"\"\"\n if config.get_option(\"display.html.table_schema\"):\n data = self.head(config.get_option(\"display.max_rows\"))\n payload = json.loads(\n data.to_json(orient=\"table\"), object_pairs_hook=collections.OrderedDict\n )\n return payload\n\n # ----------------------------------------------------------------------\n # I/O Methods\n\n _shared_docs[\n \"to_markdown\"\n ] = \"\"\"\n Print %(klass)s in Markdown-friendly format.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n buf : writable buffer, defaults to sys.stdout\n Where to send the output. By default, the output is printed to\n sys.stdout. Pass a writable buffer if you need to further process\n the output.\n mode : str, optional\n Mode in which file is opened.\n **kwargs\n These parameters will be passed to `tabulate`.\n\n Returns\n -------\n str\n %(klass)s in Markdown-friendly format.\n \"\"\"\n\n _shared_docs[\n \"to_excel\"\n ] = \"\"\"\n Write %(klass)s to an Excel sheet.\n\n To write a single %(klass)s to an Excel .xlsx file it is only necessary to\n specify a target file name. To write to multiple sheets it is necessary to\n create an `ExcelWriter` object with a target file name, and specify a sheet\n in the file to write to.\n\n Multiple sheets may be written to by specifying unique `sheet_name`.\n With all data written to the file it is necessary to save the changes.\n Note that creating an `ExcelWriter` object with a file name that already\n exists will result in the contents of the existing file being erased.\n\n Parameters\n ----------\n excel_writer : str or ExcelWriter object\n File path or existing ExcelWriter.\n sheet_name : str, default 'Sheet1'\n Name of sheet which will contain DataFrame.\n na_rep : str, default ''\n Missing data representation.\n float_format : str, optional\n Format string for floating point numbers. For example\n ``float_format=\"%%.2f\"`` will format 0.1234 to 0.12.\n columns : sequence or list of str, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of string is given it is\n assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, optional\n Column label for index column(s) if desired. If not specified, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow : int, default 0\n Upper left cell row to dump data frame.\n startcol : int, default 0\n Upper left cell column to dump data frame.\n engine : str, optional\n Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this\n via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n merge_cells : bool, default True\n Write MultiIndex and Hierarchical Rows as merged cells.\n encoding : str, optional\n Encoding of the resulting excel file. Only necessary for xlwt,\n other writers support unicode natively.\n inf_rep : str, default 'inf'\n Representation for infinity (there is no native representation for\n infinity in Excel).\n verbose : bool, default True\n Display more information in the error logs.\n freeze_panes : tuple of int (length 2), optional\n Specifies the one-based bottommost row and rightmost column that\n is to be frozen.\n\n See Also\n --------\n to_csv : Write DataFrame to a comma-separated values (csv) file.\n ExcelWriter : Class for writing DataFrame objects into excel sheets.\n read_excel : Read an Excel file into a pandas DataFrame.\n read_csv : Read a comma-separated values (csv) file into DataFrame.\n\n Notes\n -----\n For compatibility with :meth:`~DataFrame.to_csv`,\n to_excel serializes lists and dicts to strings before writing.\n\n Once a workbook has been saved it is not possible write further data\n without rewriting the whole workbook.\n\n Examples\n --------\n\n Create, write to and save a workbook:\n\n >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],\n ... index=['row 1', 'row 2'],\n ... columns=['col 1', 'col 2'])\n >>> df1.to_excel(\"output.xlsx\") # doctest: +SKIP\n\n To specify the sheet name:\n\n >>> df1.to_excel(\"output.xlsx\",\n ... sheet_name='Sheet_name_1') # doctest: +SKIP\n\n If you wish to write to more than one sheet in the workbook, it is\n necessary to specify an ExcelWriter object:\n\n >>> df2 = df1.copy()\n >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP\n ... df1.to_excel(writer, sheet_name='Sheet_name_1')\n ... df2.to_excel(writer, sheet_name='Sheet_name_2')\n\n ExcelWriter can also be used to append to an existing Excel file:\n\n >>> with pd.ExcelWriter('output.xlsx',\n ... mode='a') as writer: # doctest: +SKIP\n ... df.to_excel(writer, sheet_name='Sheet_name_3')\n\n To set the library that is used to write the Excel file,\n you can pass the `engine` keyword (the default engine is\n automatically chosen depending on the file extension):\n\n >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP\n \"\"\"\n\n @Appender(_shared_docs[\"to_excel\"] % dict(klass=\"object\"))\n def to_excel(\n self,\n excel_writer,\n sheet_name=\"Sheet1\",\n na_rep=\"\",\n float_format=None,\n columns=None,\n header=True,\n index=True,\n index_label=None,\n startrow=0,\n startcol=0,\n engine=None,\n merge_cells=True,\n encoding=None,\n inf_rep=\"inf\",\n verbose=True,\n freeze_panes=None,\n ):\n df = self if isinstance(self, ABCDataFrame) else self.to_frame()\n\n from pandas.io.formats.excel import ExcelFormatter\n\n formatter = ExcelFormatter(\n df,\n na_rep=na_rep,\n cols=columns,\n header=header,\n float_format=float_format,\n index=index,\n index_label=index_label,\n merge_cells=merge_cells,\n inf_rep=inf_rep,\n )\n formatter.write(\n excel_writer,\n sheet_name=sheet_name,\n startrow=startrow,\n startcol=startcol,\n freeze_panes=freeze_panes,\n engine=engine,\n )\n\n def to_json(\n self,\n path_or_buf: Optional[FilePathOrBuffer] = None,\n orient: Optional[str] = None,\n date_format: Optional[str] = None,\n double_precision: int = 10,\n force_ascii: bool_t = True,\n date_unit: str = \"ms\",\n default_handler: Optional[Callable[[Any], JSONSerializable]] = None,\n lines: bool_t = False,\n compression: Optional[str] = \"infer\",\n index: bool_t = True,\n indent: Optional[int] = None,\n ) -> Optional[str]:\n \"\"\"\n Convert the object to a JSON string.\n\n Note NaN's and None will be converted to null and datetime objects\n will be converted to UNIX timestamps.\n\n Parameters\n ----------\n path_or_buf : str or file handle, optional\n File path or object. If not specified, the result is returned as\n a string.\n orient : str\n Indication of expected JSON string format.\n\n * Series:\n\n - default is 'index'\n - allowed values are: {'split','records','index','table'}.\n\n * DataFrame:\n\n - default is 'columns'\n - allowed values are: {'split', 'records', 'index', 'columns',\n 'values', 'table'}.\n\n * The format of the JSON string:\n\n - 'split' : dict like {'index' -> [index], 'columns' -> [columns],\n 'data' -> [values]}\n - 'records' : list like [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n - 'columns' : dict like {column -> {index -> value}}\n - 'values' : just the values array\n - 'table' : dict like {'schema': {schema}, 'data': {data}}\n\n Describing the data, where data component is like ``orient='records'``.\n\n .. versionchanged:: 0.20.0\n\n date_format : {None, 'epoch', 'iso'}\n Type of date conversion. 'epoch' = epoch milliseconds,\n 'iso' = ISO8601. The default depends on the `orient`. For\n ``orient='table'``, the default is 'iso'. For all other orients,\n the default is 'epoch'.\n double_precision : int, default 10\n The number of decimal places to use when encoding\n floating point values.\n force_ascii : bool, default True\n Force encoded string to be ASCII.\n date_unit : str, default 'ms' (milliseconds)\n The time unit to encode to, governs timestamp and ISO8601\n precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,\n microsecond, and nanosecond respectively.\n default_handler : callable, default None\n Handler to call if object cannot otherwise be converted to a\n suitable format for JSON. Should receive a single argument which is\n the object to convert and return a serialisable object.\n lines : bool, default False\n If 'orient' is 'records' write out line delimited json format. Will\n throw ValueError if incorrect 'orient' since others are not list\n like.\n\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}\n\n A string representing the compression to use in the output file,\n only used when the first argument is a filename. By default, the\n compression is inferred from the filename.\n\n .. versionadded:: 0.21.0\n .. versionchanged:: 0.24.0\n 'infer' option added and set to default\n index : bool, default True\n Whether to include the index values in the JSON string. Not\n including the index (``index=False``) is only supported when\n orient is 'split' or 'table'.\n\n .. versionadded:: 0.23.0\n\n indent : int, optional\n Length of whitespace used to indent each record.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n None or str\n If path_or_buf is None, returns the resulting json format as a\n string. Otherwise returns None.\n\n See Also\n --------\n read_json\n\n Notes\n -----\n The behavior of ``indent=0`` varies from the stdlib, which does not\n indent the output but does insert newlines. Currently, ``indent=0``\n and the default ``indent=None`` are equivalent in pandas, though this\n may change in a future release.\n\n Examples\n --------\n\n >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],\n ... index=['row 1', 'row 2'],\n ... columns=['col 1', 'col 2'])\n >>> df.to_json(orient='split')\n '{\"columns\":[\"col 1\",\"col 2\"],\n \"index\":[\"row 1\",\"row 2\"],\n \"data\":[[\"a\",\"b\"],[\"c\",\"d\"]]}'\n\n Encoding/decoding a Dataframe using ``'records'`` formatted JSON.\n Note that index labels are not preserved with this encoding.\n\n >>> df.to_json(orient='records')\n '[{\"col 1\":\"a\",\"col 2\":\"b\"},{\"col 1\":\"c\",\"col 2\":\"d\"}]'\n\n Encoding/decoding a Dataframe using ``'index'`` formatted JSON:\n\n >>> df.to_json(orient='index')\n '{\"row 1\":{\"col 1\":\"a\",\"col 2\":\"b\"},\"row 2\":{\"col 1\":\"c\",\"col 2\":\"d\"}}'\n\n Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:\n\n >>> df.to_json(orient='columns')\n '{\"col 1\":{\"row 1\":\"a\",\"row 2\":\"c\"},\"col 2\":{\"row 1\":\"b\",\"row 2\":\"d\"}}'\n\n Encoding/decoding a Dataframe using ``'values'`` formatted JSON:\n\n >>> df.to_json(orient='values')\n '[[\"a\",\"b\"],[\"c\",\"d\"]]'\n\n Encoding with Table Schema\n\n >>> df.to_json(orient='table')\n '{\"schema\": {\"fields\": [{\"name\": \"index\", \"type\": \"string\"},\n {\"name\": \"col 1\", \"type\": \"string\"},\n {\"name\": \"col 2\", \"type\": \"string\"}],\n \"primaryKey\": \"index\",\n \"pandas_version\": \"0.20.0\"},\n \"data\": [{\"index\": \"row 1\", \"col 1\": \"a\", \"col 2\": \"b\"},\n {\"index\": \"row 2\", \"col 1\": \"c\", \"col 2\": \"d\"}]}'\n \"\"\"\n\n from pandas.io import json\n\n if date_format is None and orient == \"table\":\n date_format = \"iso\"\n elif date_format is None:\n date_format = \"epoch\"\n\n config.is_nonnegative_int(indent)\n indent = indent or 0\n\n return json.to_json(\n path_or_buf=path_or_buf,\n obj=self,\n orient=orient,\n date_format=date_format,\n double_precision=double_precision,\n force_ascii=force_ascii,\n date_unit=date_unit,\n default_handler=default_handler,\n lines=lines,\n compression=compression,\n index=index,\n indent=indent,\n )\n\n def to_hdf(\n self,\n path_or_buf,\n key: str,\n mode: str = \"a\",\n complevel: Optional[int] = None,\n complib: Optional[str] = None,\n append: bool_t = False,\n format: Optional[str] = None,\n index: bool_t = True,\n min_itemsize: Optional[Union[int, Dict[str, int]]] = None,\n nan_rep=None,\n dropna: Optional[bool_t] = None,\n data_columns: Optional[List[str]] = None,\n errors: str = \"strict\",\n encoding: str = \"UTF-8\",\n ):\n \"\"\"\n Write the contained data to an HDF5 file using HDFStore.\n\n Hierarchical Data Format (HDF) is self-describing, allowing an\n application to interpret the structure and contents of a file with\n no outside information. One HDF file can hold a mix of related objects\n which can be accessed as a group or as individual objects.\n\n In order to add another DataFrame or Series to an existing HDF file\n please use append mode and a different a key.\n\n For more information see the :ref:`user guide <io.hdf5>`.\n\n Parameters\n ----------\n path_or_buf : str or pandas.HDFStore\n File path or HDFStore object.\n key : str\n Identifier for the group in the store.\n mode : {'a', 'w', 'r+'}, default 'a'\n Mode to open file:\n\n - 'w': write, a new file is created (an existing file with\n the same name would be deleted).\n - 'a': append, an existing file is opened for reading and\n writing, and if the file does not exist it is created.\n - 'r+': similar to 'a', but the file must already exist.\n complevel : {0-9}, optional\n Specifies a compression level for data.\n A value of 0 disables compression.\n complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'\n Specifies the compression library to be used.\n As of v0.20.2 these additional compressors for Blosc are supported\n (default if no compressor specified: 'blosc:blosclz'):\n {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',\n 'blosc:zlib', 'blosc:zstd'}.\n Specifying a compression library which is not available issues\n a ValueError.\n append : bool, default False\n For Table formats, append the input data to the existing.\n format : {'fixed', 'table', None}, default 'fixed'\n Possible values:\n\n - 'fixed': Fixed format. Fast writing/reading. Not-appendable,\n nor searchable.\n - 'table': Table format. Write as a PyTables Table structure\n which may perform worse but allow more flexible operations\n like searching / selecting subsets of the data.\n - If None, pd.get_option('io.hdf.default_format') is checked,\n followed by fallback to \"fixed\"\n errors : str, default 'strict'\n Specifies how encoding and decoding errors are to be handled.\n See the errors argument for :func:`open` for a full list\n of options.\n encoding : str, default \"UTF-8\"\n min_itemsize : dict or int, optional\n Map column names to minimum string sizes for columns.\n nan_rep : Any, optional\n How to represent null values as str.\n Not allowed with append=True.\n data_columns : list of columns or True, optional\n List of columns to create as indexed data columns for on-disk\n queries, or True to use all columns. By default only the axes\n of the object are indexed. See :ref:`io.hdf5-query-data-columns`.\n Applicable only to format='table'.\n\n See Also\n --------\n DataFrame.read_hdf : Read from HDF file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n DataFrame.to_sql : Write to a sql table.\n DataFrame.to_feather : Write out feather-format for DataFrames.\n DataFrame.to_csv : Write out to a csv file.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n ... index=['a', 'b', 'c'])\n >>> df.to_hdf('data.h5', key='df', mode='w')\n\n We can add another object to the same file:\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.to_hdf('data.h5', key='s')\n\n Reading from HDF file:\n\n >>> pd.read_hdf('data.h5', 'df')\n A B\n a 1 4\n b 2 5\n c 3 6\n >>> pd.read_hdf('data.h5', 's')\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n Deleting file with data:\n\n >>> import os\n >>> os.remove('data.h5')\n \"\"\"\n from pandas.io import pytables\n\n pytables.to_hdf(\n path_or_buf,\n key,\n self,\n mode=mode,\n complevel=complevel,\n complib=complib,\n append=append,\n format=format,\n index=index,\n min_itemsize=min_itemsize,\n nan_rep=nan_rep,\n dropna=dropna,\n data_columns=data_columns,\n errors=errors,\n encoding=encoding,\n )\n\n def to_sql(\n self,\n name: str,\n con,\n schema=None,\n if_exists: str = \"fail\",\n index: bool_t = True,\n index_label=None,\n chunksize=None,\n dtype=None,\n method=None,\n ) -> None:\n \"\"\"\n Write records stored in a DataFrame to a SQL database.\n\n Databases supported by SQLAlchemy [1]_ are supported. Tables can be\n newly created, appended to, or overwritten.\n\n Parameters\n ----------\n name : str\n Name of SQL table.\n con : sqlalchemy.engine.Engine or sqlite3.Connection\n Using SQLAlchemy makes it possible to use any DB supported by that\n library. Legacy support is provided for sqlite3.Connection objects. The user\n is responsible for engine disposal and connection closure for the SQLAlchemy\n connectable See `here \\\n <https://docs.sqlalchemy.org/en/13/core/connections.html>`_\n\n schema : str, optional\n Specify the schema (if database flavor supports this). If None, use\n default schema.\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n How to behave if the table already exists.\n\n * fail: Raise a ValueError.\n * replace: Drop the table before inserting new values.\n * append: Insert new values to the existing table.\n\n index : bool, default True\n Write DataFrame index as a column. Uses `index_label` as the column\n name in the table.\n index_label : str or sequence, default None\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n chunksize : int, optional\n Specify the number of rows in each batch to be written at a time.\n By default, all rows will be written at once.\n dtype : dict or scalar, optional\n Specifying the datatype for columns. If a dictionary is used, the\n keys should be the column names and the values should be the\n SQLAlchemy types or strings for the sqlite3 legacy mode. If a\n scalar is provided, it will be applied to all columns.\n method : {None, 'multi', callable}, optional\n Controls the SQL insertion clause used:\n\n * None : Uses standard SQL ``INSERT`` clause (one per row).\n * 'multi': Pass multiple values in a single ``INSERT`` clause.\n * callable with signature ``(pd_table, conn, keys, data_iter)``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method <io.sql.method>`.\n\n .. versionadded:: 0.24.0\n\n Raises\n ------\n ValueError\n When the table already exists and `if_exists` is 'fail' (the\n default).\n\n See Also\n --------\n read_sql : Read a DataFrame from a table.\n\n Notes\n -----\n Timezone aware datetime columns will be written as\n ``Timestamp with timezone`` type with SQLAlchemy if supported by the\n database. Otherwise, the datetimes will be stored as timezone unaware\n timestamps local to the original timezone.\n\n .. versionadded:: 0.24.0\n\n References\n ----------\n .. [1] http://docs.sqlalchemy.org\n .. [2] https://www.python.org/dev/peps/pep-0249/\n\n Examples\n --------\n\n Create an in-memory SQLite database.\n\n >>> from sqlalchemy import create_engine\n >>> engine = create_engine('sqlite://', echo=False)\n\n Create a table from scratch with 3 rows.\n\n >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})\n >>> df\n name\n 0 User 1\n 1 User 2\n 2 User 3\n\n >>> df.to_sql('users', con=engine)\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]\n\n >>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})\n >>> df1.to_sql('users', con=engine, if_exists='append')\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),\n (0, 'User 4'), (1, 'User 5')]\n\n Overwrite the table with just ``df1``.\n\n >>> df1.to_sql('users', con=engine, if_exists='replace',\n ... index_label='id')\n >>> engine.execute(\"SELECT * FROM users\").fetchall()\n [(0, 'User 4'), (1, 'User 5')]\n\n Specify the dtype (especially useful for integers with missing values).\n Notice that while pandas is forced to store the data as floating point,\n the database supports nullable integers. When fetching the data with\n Python, we get back integer scalars.\n\n >>> df = pd.DataFrame({\"A\": [1, None, 2]})\n >>> df\n A\n 0 1.0\n 1 NaN\n 2 2.0\n\n >>> from sqlalchemy.types import Integer\n >>> df.to_sql('integers', con=engine, index=False,\n ... dtype={\"A\": Integer()})\n\n >>> engine.execute(\"SELECT * FROM integers\").fetchall()\n [(1,), (None,), (2,)]\n \"\"\"\n from pandas.io import sql\n\n sql.to_sql(\n self,\n name,\n con,\n schema=schema,\n if_exists=if_exists,\n index=index,\n index_label=index_label,\n chunksize=chunksize,\n dtype=dtype,\n method=method,\n )\n\n def to_pickle(\n self,\n path,\n compression: Optional[str] = \"infer\",\n protocol: int = pickle.HIGHEST_PROTOCOL,\n ) -> None:\n \"\"\"\n Pickle (serialize) object to file.\n\n Parameters\n ----------\n path : str\n File path where the pickled object will be stored.\n compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \\\n default 'infer'\n A string representing the compression to use in the output file. By\n default, infers from the file extension in specified path.\n protocol : int\n Int which indicates which protocol should be used by the pickler,\n default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible\n values are 0, 1, 2, 3, 4. A negative value for the protocol\n parameter is equivalent to setting its value to HIGHEST_PROTOCOL.\n\n .. [1] https://docs.python.org/3/library/pickle.html.\n .. versionadded:: 0.21.0.\n\n See Also\n --------\n read_pickle : Load pickled pandas object (or any object) from file.\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_sql : Write DataFrame to a SQL database.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Examples\n --------\n >>> original_df = pd.DataFrame({\"foo\": range(5), \"bar\": range(5, 10)})\n >>> original_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n >>> original_df.to_pickle(\"./dummy.pkl\")\n\n >>> unpickled_df = pd.read_pickle(\"./dummy.pkl\")\n >>> unpickled_df\n foo bar\n 0 0 5\n 1 1 6\n 2 2 7\n 3 3 8\n 4 4 9\n\n >>> import os\n >>> os.remove(\"./dummy.pkl\")\n \"\"\"\n from pandas.io.pickle import to_pickle\n\n to_pickle(self, path, compression=compression, protocol=protocol)\n\n def to_clipboard(self, excel: bool_t = True, sep: Optional[str] = None, **kwargs):\n r\"\"\"\n Copy object to the system clipboard.\n\n Write a text representation of object to the system clipboard.\n This can be pasted into Excel, for example.\n\n Parameters\n ----------\n excel : bool, default True\n Produce output in a csv format for easy pasting into excel.\n\n - True, use the provided separator for csv pasting.\n - False, write a string representation of the object to the clipboard.\n\n sep : str, default ``'\\t'``\n Field delimiter.\n **kwargs\n These parameters will be passed to DataFrame.to_csv.\n\n See Also\n --------\n DataFrame.to_csv : Write a DataFrame to a comma-separated values\n (csv) file.\n read_clipboard : Read text from clipboard and pass to read_table.\n\n Notes\n -----\n Requirements for your platform.\n\n - Linux : `xclip`, or `xsel` (with `PyQt4` modules)\n - Windows : none\n - OS X : none\n\n Examples\n --------\n Copy the contents of a DataFrame to the clipboard.\n\n >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])\n >>> df.to_clipboard(sep=',')\n ... # Wrote the following to the system clipboard:\n ... # ,A,B,C\n ... # 0,1,2,3\n ... # 1,4,5,6\n\n We can omit the the index by passing the keyword `index` and setting\n it to false.\n\n >>> df.to_clipboard(sep=',', index=False)\n ... # Wrote the following to the system clipboard:\n ... # A,B,C\n ... # 1,2,3\n ... # 4,5,6\n \"\"\"\n from pandas.io import clipboards\n\n clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)\n\n def to_xarray(self):\n \"\"\"\n Return an xarray object from the pandas object.\n\n Returns\n -------\n xarray.DataArray or xarray.Dataset\n Data in the pandas structure converted to Dataset if the object is\n a DataFrame, or a DataArray if the object is a Series.\n\n See Also\n --------\n DataFrame.to_hdf : Write DataFrame to an HDF5 file.\n DataFrame.to_parquet : Write a DataFrame to the binary parquet format.\n\n Notes\n -----\n See the `xarray docs <http://xarray.pydata.org/en/stable/>`__\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),\n ... ('parrot', 'bird', 24.0, 2),\n ... ('lion', 'mammal', 80.5, 4),\n ... ('monkey', 'mammal', np.nan, 4)],\n ... columns=['name', 'class', 'max_speed',\n ... 'num_legs'])\n >>> df\n name class max_speed num_legs\n 0 falcon bird 389.0 2\n 1 parrot bird 24.0 2\n 2 lion mammal 80.5 4\n 3 monkey mammal NaN 4\n\n >>> df.to_xarray()\n <xarray.Dataset>\n Dimensions: (index: 4)\n Coordinates:\n * index (index) int64 0 1 2 3\n Data variables:\n name (index) object 'falcon' 'parrot' 'lion' 'monkey'\n class (index) object 'bird' 'bird' 'mammal' 'mammal'\n max_speed (index) float64 389.0 24.0 80.5 nan\n num_legs (index) int64 2 2 4 4\n\n >>> df['max_speed'].to_xarray()\n <xarray.DataArray 'max_speed' (index: 4)>\n array([389. , 24. , 80.5, nan])\n Coordinates:\n * index (index) int64 0 1 2 3\n\n >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',\n ... '2018-01-02', '2018-01-02'])\n >>> df_multiindex = pd.DataFrame({'date': dates,\n ... 'animal': ['falcon', 'parrot',\n ... 'falcon', 'parrot'],\n ... 'speed': [350, 18, 361, 15]})\n >>> df_multiindex = df_multiindex.set_index(['date', 'animal'])\n\n >>> df_multiindex\n speed\n date animal\n 2018-01-01 falcon 350\n parrot 18\n 2018-01-02 falcon 361\n parrot 15\n\n >>> df_multiindex.to_xarray()\n <xarray.Dataset>\n Dimensions: (animal: 2, date: 2)\n Coordinates:\n * date (date) datetime64[ns] 2018-01-01 2018-01-02\n * animal (animal) object 'falcon' 'parrot'\n Data variables:\n speed (date, animal) int64 350 18 361 15\n \"\"\"\n xarray = import_optional_dependency(\"xarray\")\n\n if self.ndim == 1:\n return xarray.DataArray.from_series(self)\n else:\n return xarray.Dataset.from_dataframe(self)\n\n @Substitution(returns=fmt.return_docstring)\n def to_latex(\n self,\n buf=None,\n columns=None,\n col_space=None,\n header=True,\n index=True,\n na_rep=\"NaN\",\n formatters=None,\n float_format=None,\n sparsify=None,\n index_names=True,\n bold_rows=False,\n column_format=None,\n longtable=None,\n escape=None,\n encoding=None,\n decimal=\".\",\n multicolumn=None,\n multicolumn_format=None,\n multirow=None,\n caption=None,\n label=None,\n ):\n r\"\"\"\n Render object to a LaTeX tabular, longtable, or nested table/tabular.\n\n Requires ``\\usepackage{booktabs}``. The output can be copy/pasted\n into a main LaTeX document or read from an external file\n with ``\\input{table.tex}``.\n\n .. versionchanged:: 0.20.2\n Added to Series.\n\n .. versionchanged:: 1.0.0\n Added caption and label arguments.\n\n Parameters\n ----------\n buf : str, Path or StringIO-like, optional, default None\n Buffer to write to. If None, the output is returned as a string.\n columns : list of label, optional\n The subset of columns to write. Writes all columns by default.\n col_space : int, optional\n The minimum width of each column.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given,\n it is assumed to be aliases for the column names.\n index : bool, default True\n Write row names (index).\n na_rep : str, default 'NaN'\n Missing data representation.\n formatters : list of functions or dict of {str: function}, optional\n Formatter functions to apply to columns' elements by position or\n name. The result of each function must be a unicode string.\n List must be of length equal to the number of columns.\n float_format : one-parameter function or str, optional, default None\n Formatter for floating point numbers. For example\n ``float_format=\"%%.2f\"`` and ``float_format=\"{:0.2f}\".format`` will\n both result in 0.1234 being formatted as 0.12.\n sparsify : bool, optional\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row. By default, the value will be\n read from the config module.\n index_names : bool, default True\n Prints the names of the indexes.\n bold_rows : bool, default False\n Make the row labels bold in the output.\n column_format : str, optional\n The columns format as specified in `LaTeX table format\n <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3\n columns. By default, 'l' will be used for all columns except\n columns of numbers, which default to 'r'.\n longtable : bool, optional\n By default, the value will be read from the pandas config\n module. Use a longtable environment instead of tabular. Requires\n adding a \\usepackage{longtable} to your LaTeX preamble.\n escape : bool, optional\n By default, the value will be read from the pandas config\n module. When set to False prevents from escaping latex special\n characters in column names.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to 'utf-8'.\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n multicolumn : bool, default True\n Use \\multicolumn to enhance MultiIndex columns.\n The default will be read from the config module.\n multicolumn_format : str, default 'l'\n The alignment for multicolumns, similar to `column_format`\n The default will be read from the config module.\n multirow : bool, default False\n Use \\multirow to enhance MultiIndex rows. Requires adding a\n \\usepackage{multirow} to your LaTeX preamble. Will print\n centered labels (instead of top-aligned) across the contained\n rows, separating groups via clines. The default will be read\n from the pandas config module.\n caption : str, optional\n The LaTeX caption to be placed inside ``\\caption{}`` in the output.\n\n .. versionadded:: 1.0.0\n\n label : str, optional\n The LaTeX label to be placed inside ``\\label{}`` in the output.\n This is used with ``\\ref{}`` in the main ``.tex`` file.\n\n .. versionadded:: 1.0.0\n %(returns)s\n See Also\n --------\n DataFrame.to_string : Render a DataFrame to a console-friendly\n tabular output.\n DataFrame.to_html : Render a DataFrame as an HTML table.\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']})\n >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE\n \\begin{tabular}{lll}\n \\toprule\n name & mask & weapon \\\\\n \\midrule\n Raphael & red & sai \\\\\n Donatello & purple & bo staff \\\\\n \\bottomrule\n \\end{tabular}\n \"\"\"\n # Get defaults from the pandas config\n if self.ndim == 1:\n self = self.to_frame()\n if longtable is None:\n longtable = config.get_option(\"display.latex.longtable\")\n if escape is None:\n escape = config.get_option(\"display.latex.escape\")\n if multicolumn is None:\n multicolumn = config.get_option(\"display.latex.multicolumn\")\n if multicolumn_format is None:\n multicolumn_format = config.get_option(\"display.latex.multicolumn_format\")\n if multirow is None:\n multirow = config.get_option(\"display.latex.multirow\")\n\n formatter = DataFrameFormatter(\n self,\n columns=columns,\n col_space=col_space,\n na_rep=na_rep,\n header=header,\n index=index,\n formatters=formatters,\n float_format=float_format,\n bold_rows=bold_rows,\n sparsify=sparsify,\n index_names=index_names,\n escape=escape,\n decimal=decimal,\n )\n return formatter.to_latex(\n buf=buf,\n column_format=column_format,\n longtable=longtable,\n encoding=encoding,\n multicolumn=multicolumn,\n multicolumn_format=multicolumn_format,\n multirow=multirow,\n caption=caption,\n label=label,\n )\n\n def to_csv(\n self,\n path_or_buf: Optional[FilePathOrBuffer] = None,\n sep: str = \",\",\n na_rep: str = \"\",\n float_format: Optional[str] = None,\n columns: Optional[Sequence[Optional[Hashable]]] = None,\n header: Union[bool_t, List[str]] = True,\n index: bool_t = True,\n index_label: Optional[Union[bool_t, str, Sequence[Optional[Hashable]]]] = None,\n mode: str = \"w\",\n encoding: Optional[str] = None,\n compression: Optional[Union[str, Mapping[str, str]]] = \"infer\",\n quoting: Optional[int] = None,\n quotechar: str = '\"',\n line_terminator: Optional[str] = None,\n chunksize: Optional[int] = None,\n date_format: Optional[str] = None,\n doublequote: bool_t = True,\n escapechar: Optional[str] = None,\n decimal: Optional[str] = \".\",\n ) -> Optional[str]:\n r\"\"\"\n Write object to a comma-separated values (csv) file.\n\n .. versionchanged:: 0.24.0\n The order of arguments for Series was changed.\n\n Parameters\n ----------\n path_or_buf : str or file handle, default None\n File path or object, if None is provided the result is returned as\n a string. If a file object is passed it should be opened with\n `newline=''`, disabling universal newlines.\n\n .. versionchanged:: 0.24.0\n\n Was previously named \"path\" for Series.\n\n sep : str, default ','\n String of length 1. Field delimiter for the output file.\n na_rep : str, default ''\n Missing data representation.\n float_format : str, default None\n Format string for floating point numbers.\n columns : sequence, optional\n Columns to write.\n header : bool or list of str, default True\n Write out the column names. If a list of strings is given it is\n assumed to be aliases for the column names.\n\n .. versionchanged:: 0.24.0\n\n Previously defaulted to False for Series.\n\n index : bool, default True\n Write row names (index).\n index_label : str or sequence, or False, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the object uses MultiIndex. If\n False do not print fields for index names. Use index_label=False\n for easier importing in R.\n mode : str\n Python write mode, default 'w'.\n encoding : str, optional\n A string representing the encoding to use in the output file,\n defaults to 'utf-8'.\n compression : str or dict, default 'infer'\n If str, represents compression mode. If dict, value at 'method' is\n the compression mode. Compression mode may be any of the following\n possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If\n compression mode is 'infer' and `path_or_buf` is path-like, then\n detect compression mode from the following extensions: '.gz',\n '.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given\n and mode is 'zip' or inferred as 'zip', other entries passed as\n additional compression options.\n\n .. versionchanged:: 1.0.0\n\n May now be a dict with key 'method' as compression mode\n and other entries as additional compression options if\n compression mode is 'zip'.\n\n quoting : optional constant from csv module\n Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`\n then floats are converted to strings and thus csv.QUOTE_NONNUMERIC\n will treat them as non-numeric.\n quotechar : str, default '\\\"'\n String of length 1. Character used to quote fields.\n line_terminator : str, optional\n The newline character or character sequence to use in the output\n file. Defaults to `os.linesep`, which depends on the OS in which\n this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.).\n\n .. versionchanged:: 0.24.0\n chunksize : int or None\n Rows to write at a time.\n date_format : str, default None\n Format string for datetime objects.\n doublequote : bool, default True\n Control quoting of `quotechar` inside a field.\n escapechar : str, default None\n String of length 1. Character used to escape `sep` and `quotechar`\n when appropriate.\n decimal : str, default '.'\n Character recognized as decimal separator. E.g. use ',' for\n European data.\n\n Returns\n -------\n None or str\n If path_or_buf is None, returns the resulting csv format as a\n string. Otherwise returns None.\n\n See Also\n --------\n read_csv : Load a CSV file into a DataFrame.\n to_excel : Write DataFrame to an Excel file.\n\n Examples\n --------\n >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],\n ... 'mask': ['red', 'purple'],\n ... 'weapon': ['sai', 'bo staff']})\n >>> df.to_csv(index=False)\n 'name,mask,weapon\\nRaphael,red,sai\\nDonatello,purple,bo staff\\n'\n\n # create 'out.zip' containing 'out.csv'\n >>> compression_opts = dict(method='zip',\n ... archive_name='out.csv') # doctest: +SKIP\n\n >>> df.to_csv('out.zip', index=False,\n ... compression=compression_opts) # doctest: +SKIP\n \"\"\"\n\n df = self if isinstance(self, ABCDataFrame) else self.to_frame()\n\n from pandas.io.formats.csvs import CSVFormatter\n\n formatter = CSVFormatter(\n df,\n path_or_buf,\n line_terminator=line_terminator,\n sep=sep,\n encoding=encoding,\n compression=compression,\n quoting=quoting,\n na_rep=na_rep,\n float_format=float_format,\n cols=columns,\n header=header,\n index=index,\n index_label=index_label,\n mode=mode,\n chunksize=chunksize,\n quotechar=quotechar,\n date_format=date_format,\n doublequote=doublequote,\n escapechar=escapechar,\n decimal=decimal,\n )\n formatter.save()\n\n if path_or_buf is None:\n return formatter.path_or_buf.getvalue()\n\n return None\n\n # ----------------------------------------------------------------------\n # Fancy Indexing\n\n @classmethod\n def _create_indexer(cls, name: str, indexer) -> None:\n \"\"\"Create an indexer like _name in the class.\"\"\"\n if getattr(cls, name, None) is None:\n _indexer = functools.partial(indexer, name)\n setattr(cls, name, property(_indexer, doc=indexer.__doc__))\n\n # ----------------------------------------------------------------------\n # Lookup Caching\n\n def _set_as_cached(self, item, cacher) -> None:\n \"\"\"Set the _cacher attribute on the calling object with a weakref to\n cacher.\n \"\"\"\n self._cacher = (item, weakref.ref(cacher))\n\n def _reset_cacher(self) -> None:\n \"\"\"Reset the cacher.\"\"\"\n if hasattr(self, \"_cacher\"):\n del self._cacher\n\n def _maybe_cache_changed(self, item, value) -> None:\n \"\"\"The object has called back to us saying maybe it has changed.\n \"\"\"\n self._data.set(item, value)\n\n @property\n def _is_cached(self) -> bool_t:\n \"\"\"Return boolean indicating if self is cached or not.\"\"\"\n return getattr(self, \"_cacher\", None) is not None\n\n def _get_cacher(self):\n \"\"\"return my cacher or None\"\"\"\n cacher = getattr(self, \"_cacher\", None)\n if cacher is not None:\n cacher = cacher[1]()\n return cacher\n\n def _maybe_update_cacher(\n self, clear: bool_t = False, verify_is_copy: bool_t = True\n ) -> None:\n \"\"\"\n See if we need to update our parent cacher if clear, then clear our\n cache.\n\n Parameters\n ----------\n clear : bool, default False\n Clear the item cache.\n verify_is_copy : bool, default True\n Provide is_copy checks.\n \"\"\"\n\n cacher = getattr(self, \"_cacher\", None)\n if cacher is not None:\n ref = cacher[1]()\n\n # we are trying to reference a dead referant, hence\n # a copy\n if ref is None:\n del self._cacher\n else:\n # Note: we need to call ref._maybe_cache_changed even in the\n # case where it will raise. (Uh, not clear why)\n try:\n ref._maybe_cache_changed(cacher[0], self)\n except AssertionError:\n # ref._data.setitem can raise\n # AssertionError because of shape mismatch\n pass\n\n if verify_is_copy:\n self._check_setitem_copy(stacklevel=5, t=\"referant\")\n\n if clear:\n self._clear_item_cache()\n\n def _clear_item_cache(self) -> None:\n self._item_cache.clear()\n\n # ----------------------------------------------------------------------\n # Indexing Methods\n\n def take(self, indices, axis=0, is_copy: bool_t = True, **kwargs):\n \"\"\"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n The axis on which to select elements. ``0`` means that we are\n selecting rows, ``1`` means that we are selecting columns.\n is_copy : bool, default True\n Whether to return a copy of the original object or not.\n **kwargs\n For compatibility with :meth:`numpy.take`. Has no effect on the\n output.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=['name', 'class', 'max_speed'],\n ... index=[0, 2, 3, 1])\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 2 parrot bird 24.0\n 3 lion mammal 80.5\n 1 monkey mammal NaN\n\n Take elements at positions 0 and 3 along the axis 0 (default).\n\n Note how the actual indices selected (0 and 1) do not correspond to\n our selected indices 0 and 3. That's because we are selecting the 0th\n and 3rd rows, not rows whose indices equal 0 and 3.\n\n >>> df.take([0, 3])\n name class max_speed\n 0 falcon bird 389.0\n 1 monkey mammal NaN\n\n Take elements at indices 1 and 2 along the axis 1 (column selection).\n\n >>> df.take([1, 2], axis=1)\n class max_speed\n 0 bird 389.0\n 2 bird 24.0\n 3 mammal 80.5\n 1 mammal NaN\n\n We may take elements using negative integers for positive indices,\n starting from the end of the object, just like with Python lists.\n\n >>> df.take([-1, -2])\n name class max_speed\n 1 monkey mammal NaN\n 3 lion mammal 80.5\n \"\"\"\n nv.validate_take(tuple(), kwargs)\n\n self._consolidate_inplace()\n\n new_data = self._data.take(\n indices, axis=self._get_block_manager_axis(axis), verify=True\n )\n result = self._constructor(new_data).__finalize__(self)\n\n # Maybe set copy if we didn't actually change the index.\n if is_copy:\n if not result._get_axis(axis).equals(self._get_axis(axis)):\n result._set_is_copy(self)\n\n return result\n\n def xs(self, key, axis=0, level=None, drop_level: bool_t = True):\n \"\"\"\n Return cross-section from the Series/DataFrame.\n\n This method takes a `key` argument to select data at a particular\n level of a MultiIndex.\n\n Parameters\n ----------\n key : label or tuple of label\n Label contained in the index, or partially in a MultiIndex.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis to retrieve cross-section on.\n level : object, defaults to first n levels (n=1 or len(key))\n In case of a key partially contained in a MultiIndex, indicate\n which levels are used. Levels can be referred by label or position.\n drop_level : bool, default True\n If False, returns object with same levels as self.\n\n Returns\n -------\n Series or DataFrame\n Cross-section from the original Series or DataFrame\n corresponding to the selected index levels.\n\n See Also\n --------\n DataFrame.loc : Access a group of rows and columns\n by label(s) or a boolean array.\n DataFrame.iloc : Purely integer-location based indexing\n for selection by position.\n\n Notes\n -----\n `xs` can not be used to set values.\n\n MultiIndex Slicers is a generic way to get/set values on\n any level or levels.\n It is a superset of `xs` functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`.\n\n Examples\n --------\n >>> d = {'num_legs': [4, 4, 2, 2],\n ... 'num_wings': [0, 0, 2, 2],\n ... 'class': ['mammal', 'mammal', 'mammal', 'bird'],\n ... 'animal': ['cat', 'dog', 'bat', 'penguin'],\n ... 'locomotion': ['walks', 'walks', 'flies', 'walks']}\n >>> df = pd.DataFrame(data=d)\n >>> df = df.set_index(['class', 'animal', 'locomotion'])\n >>> df\n num_legs num_wings\n class animal locomotion\n mammal cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n bird penguin walks 2 2\n\n Get values at specified index\n\n >>> df.xs('mammal')\n num_legs num_wings\n animal locomotion\n cat walks 4 0\n dog walks 4 0\n bat flies 2 2\n\n Get values at several indexes\n\n >>> df.xs(('mammal', 'dog'))\n num_legs num_wings\n locomotion\n walks 4 0\n\n Get values at specified index and level\n\n >>> df.xs('cat', level=1)\n num_legs num_wings\n class locomotion\n mammal walks 4 0\n\n Get values at several indexes and levels\n\n >>> df.xs(('bird', 'walks'),\n ... level=[0, 'locomotion'])\n num_legs num_wings\n animal\n penguin 2 2\n\n Get values at specified column and axis\n\n >>> df.xs('num_wings', axis=1)\n class animal locomotion\n mammal cat walks 0\n dog walks 0\n bat flies 2\n bird penguin walks 2\n Name: num_wings, dtype: int64\n \"\"\"\n axis = self._get_axis_number(axis)\n labels = self._get_axis(axis)\n if level is not None:\n loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)\n\n # create the tuple of the indexer\n _indexer = [slice(None)] * self.ndim\n _indexer[axis] = loc\n indexer = tuple(_indexer)\n\n result = self.iloc[indexer]\n setattr(result, result._get_axis_name(axis), new_ax)\n return result\n\n if axis == 1:\n return self[key]\n\n self._consolidate_inplace()\n\n index = self.index\n if isinstance(index, MultiIndex):\n loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)\n else:\n loc = self.index.get_loc(key)\n\n if isinstance(loc, np.ndarray):\n if loc.dtype == np.bool_:\n (inds,) = loc.nonzero()\n return self.take(inds, axis=axis)\n else:\n return self.take(loc, axis=axis)\n\n if not is_scalar(loc):\n new_index = self.index[loc]\n\n if is_scalar(loc):\n new_values = self._data.fast_xs(loc)\n\n # may need to box a datelike-scalar\n #\n # if we encounter an array-like and we only have 1 dim\n # that means that their are list/ndarrays inside the Series!\n # so just return them (GH 6394)\n if not is_list_like(new_values) or self.ndim == 1:\n return com.maybe_box_datetimelike(new_values)\n\n result = self._constructor_sliced(\n new_values,\n index=self.columns,\n name=self.index[loc],\n dtype=new_values.dtype,\n )\n\n else:\n result = self.iloc[loc]\n result.index = new_index\n\n # this could be a view\n # but only in a single-dtyped view sliceable case\n result._set_is_copy(self, copy=not result._is_view)\n return result\n\n _xs: Callable = xs\n\n def __getitem__(self, item):\n raise AbstractMethodError(self)\n\n def _get_item_cache(self, item):\n \"\"\"Return the cached item, item represents a label indexer.\"\"\"\n cache = self._item_cache\n res = cache.get(item)\n if res is None:\n values = self._data.get(item)\n res = self._box_item_values(item, values)\n cache[item] = res\n res._set_as_cached(item, self)\n\n # for a chain\n res._is_copy = self._is_copy\n return res\n\n def _iget_item_cache(self, item):\n \"\"\"Return the cached item, item represents a positional indexer.\"\"\"\n ax = self._info_axis\n if ax.is_unique:\n lower = self._get_item_cache(ax[item])\n else:\n lower = self.take(item, axis=self._info_axis_number)\n return lower\n\n def _box_item_values(self, key, values):\n raise AbstractMethodError(self)\n\n def _slice(self, slobj: slice, axis=0, kind=None):\n \"\"\"\n Construct a slice of this container.\n\n kind parameter is maintained for compatibility with Series slicing.\n \"\"\"\n axis = self._get_block_manager_axis(axis)\n result = self._constructor(self._data.get_slice(slobj, axis=axis))\n result = result.__finalize__(self)\n\n # this could be a view\n # but only in a single-dtyped view sliceable case\n is_copy = axis != 0 or result._is_view\n result._set_is_copy(self, copy=is_copy)\n return result\n\n def _set_item(self, key, value) -> None:\n self._data.set(key, value)\n self._clear_item_cache()\n\n def _set_is_copy(self, ref=None, copy: bool_t = True) -> None:\n if not copy:\n self._is_copy = None\n else:\n if ref is not None:\n self._is_copy = weakref.ref(ref)\n else:\n self._is_copy = None\n\n def _check_is_chained_assignment_possible(self) -> bool_t:\n \"\"\"\n Check if we are a view, have a cacher, and are of mixed type.\n If so, then force a setitem_copy check.\n\n Should be called just near setting a value\n\n Will return a boolean if it we are a view and are cached, but a\n single-dtype meaning that the cacher should be updated following\n setting.\n \"\"\"\n if self._is_view and self._is_cached:\n ref = self._get_cacher()\n if ref is not None and ref._is_mixed_type:\n self._check_setitem_copy(stacklevel=4, t=\"referant\", force=True)\n return True\n elif self._is_copy:\n self._check_setitem_copy(stacklevel=4, t=\"referant\")\n return False\n\n def _check_setitem_copy(self, stacklevel=4, t=\"setting\", force=False):\n \"\"\"\n\n Parameters\n ----------\n stacklevel : int, default 4\n the level to show of the stack when the error is output\n t : str, the type of setting error\n force : bool, default False\n If True, then force showing an error.\n\n validate if we are doing a setitem on a chained copy.\n\n If you call this function, be sure to set the stacklevel such that the\n user will see the error *at the level of setting*\n\n It is technically possible to figure out that we are setting on\n a copy even WITH a multi-dtyped pandas object. In other words, some\n blocks may be views while other are not. Currently _is_view will ALWAYS\n return False for multi-blocks to avoid having to handle this case.\n\n df = DataFrame(np.arange(0,9), columns=['count'])\n df['group'] = 'b'\n\n # This technically need not raise SettingWithCopy if both are view\n # (which is not # generally guaranteed but is usually True. However,\n # this is in general not a good practice and we recommend using .loc.\n df.iloc[0:5]['group'] = 'a'\n\n \"\"\"\n\n # return early if the check is not needed\n if not (force or self._is_copy):\n return\n\n value = config.get_option(\"mode.chained_assignment\")\n if value is None:\n return\n\n # see if the copy is not actually referred; if so, then dissolve\n # the copy weakref\n if self._is_copy is not None and not isinstance(self._is_copy, str):\n r = self._is_copy()\n if not gc.get_referents(r) or r.shape == self.shape:\n self._is_copy = None\n return\n\n # a custom message\n if isinstance(self._is_copy, str):\n t = self._is_copy\n\n elif t == \"referant\":\n t = (\n \"\\n\"\n \"A value is trying to be set on a copy of a slice from a \"\n \"DataFrame\\n\\n\"\n \"See the caveats in the documentation: \"\n \"http://pandas.pydata.org/pandas-docs/stable/user_guide/\"\n \"indexing.html#returning-a-view-versus-a-copy\"\n )\n\n else:\n t = (\n \"\\n\"\n \"A value is trying to be set on a copy of a slice from a \"\n \"DataFrame.\\n\"\n \"Try using .loc[row_indexer,col_indexer] = value \"\n \"instead\\n\\nSee the caveats in the documentation: \"\n \"http://pandas.pydata.org/pandas-docs/stable/user_guide/\"\n \"indexing.html#returning-a-view-versus-a-copy\"\n )\n\n if value == \"raise\":\n raise com.SettingWithCopyError(t)\n elif value == \"warn\":\n warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)\n\n def __delitem__(self, key):\n \"\"\"\n Delete item\n \"\"\"\n deleted = False\n\n maybe_shortcut = False\n if self.ndim == 2 and isinstance(self.columns, MultiIndex):\n try:\n maybe_shortcut = key not in self.columns._engine\n except TypeError:\n pass\n\n if maybe_shortcut:\n # Allow shorthand to delete all columns whose first len(key)\n # elements match key:\n if not isinstance(key, tuple):\n key = (key,)\n for col in self.columns:\n if isinstance(col, tuple) and col[: len(key)] == key:\n del self[col]\n deleted = True\n if not deleted:\n # If the above loop ran and didn't delete anything because\n # there was no match, this call should raise the appropriate\n # exception:\n self._data.delete(key)\n\n # delete from the caches\n try:\n del self._item_cache[key]\n except KeyError:\n pass\n\n # ----------------------------------------------------------------------\n # Unsorted\n\n def get(self, key, default=None):\n \"\"\"\n Get item from object for given key (ex: DataFrame column).\n\n Returns default value if not found.\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n value : same type as items contained in object\n \"\"\"\n try:\n return self[key]\n except (KeyError, ValueError, IndexError):\n return default\n\n @property\n def _is_view(self):\n \"\"\"Return boolean indicating if self is view of another array \"\"\"\n return self._data.is_view\n\n def reindex_like(\n self,\n other,\n method: Optional[str] = None,\n copy: bool_t = True,\n limit=None,\n tolerance=None,\n ):\n \"\"\"\n Return an object with matching indices as other object.\n\n Conform the object to the same index on all axes. Optional\n filling logic, placing NaN in locations having no value\n in the previous index. A new object is produced unless the\n new index is equivalent to the current one and copy=False.\n\n Parameters\n ----------\n other : Object of the same data type\n Its row and column indices are used to define the new indices\n of this object.\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid observation forward to next\n valid\n * backfill / bfill: use next valid observation to fill gap\n * nearest: use nearest valid observations to fill gap.\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n limit : int, default None\n Maximum number of consecutive labels to fill for inexact matches.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n Series or DataFrame\n Same type as caller, but with changed indices on each axis.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex : Change to new indices or expand indices.\n\n Notes\n -----\n Same as calling\n ``.reindex(index=other.index, columns=other.columns,...)``.\n\n Examples\n --------\n >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],\n ... [31, 87.8, 'high'],\n ... [22, 71.6, 'medium'],\n ... [35, 95, 'medium']],\n ... columns=['temp_celsius', 'temp_fahrenheit',\n ... 'windspeed'],\n ... index=pd.date_range(start='2014-02-12',\n ... end='2014-02-15', freq='D'))\n\n >>> df1\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 24.3 75.7 high\n 2014-02-13 31.0 87.8 high\n 2014-02-14 22.0 71.6 medium\n 2014-02-15 35.0 95.0 medium\n\n >>> df2 = pd.DataFrame([[28, 'low'],\n ... [30, 'low'],\n ... [35.1, 'medium']],\n ... columns=['temp_celsius', 'windspeed'],\n ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',\n ... '2014-02-15']))\n\n >>> df2\n temp_celsius windspeed\n 2014-02-12 28.0 low\n 2014-02-13 30.0 low\n 2014-02-15 35.1 medium\n\n >>> df2.reindex_like(df1)\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 28.0 NaN low\n 2014-02-13 30.0 NaN low\n 2014-02-14 NaN NaN NaN\n 2014-02-15 35.1 NaN medium\n \"\"\"\n d = other._construct_axes_dict(\n axes=self._AXIS_ORDERS,\n method=method,\n copy=copy,\n limit=limit,\n tolerance=tolerance,\n )\n\n return self.reindex(**d)\n\n def drop(\n self,\n labels=None,\n axis=0,\n index=None,\n columns=None,\n level=None,\n inplace: bool_t = False,\n errors: str = \"raise\",\n ):\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n if labels is not None:\n if index is not None or columns is not None:\n raise ValueError(\"Cannot specify both 'labels' and 'index'/'columns'\")\n axis_name = self._get_axis_name(axis)\n axes = {axis_name: labels}\n elif index is not None or columns is not None:\n axes, _ = self._construct_axes_from_arguments((index, columns), {})\n else:\n raise ValueError(\n \"Need to specify at least one of 'labels', 'index' or 'columns'\"\n )\n\n obj = self\n\n for axis, labels in axes.items():\n if labels is not None:\n obj = obj._drop_axis(labels, axis, level=level, errors=errors)\n\n if inplace:\n self._update_inplace(obj)\n else:\n return obj\n\n def _drop_axis(self, labels, axis, level=None, errors: str = \"raise\"):\n \"\"\"\n Drop labels from specified axis. Used in the ``drop`` method\n internally.\n\n Parameters\n ----------\n labels : single label or list-like\n axis : int or axis name\n level : int or level name, default None\n For MultiIndex\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and existing labels are dropped.\n\n \"\"\"\n axis = self._get_axis_number(axis)\n axis_name = self._get_axis_name(axis)\n axis = self._get_axis(axis)\n\n if axis.is_unique:\n if level is not None:\n if not isinstance(axis, MultiIndex):\n raise AssertionError(\"axis must be a MultiIndex\")\n new_axis = axis.drop(labels, level=level, errors=errors)\n else:\n new_axis = axis.drop(labels, errors=errors)\n result = self.reindex(**{axis_name: new_axis})\n\n # Case for non-unique axis\n else:\n labels = ensure_object(com.index_labels_to_array(labels))\n if level is not None:\n if not isinstance(axis, MultiIndex):\n raise AssertionError(\"axis must be a MultiIndex\")\n indexer = ~axis.get_level_values(level).isin(labels)\n\n # GH 18561 MultiIndex.drop should raise if label is absent\n if errors == \"raise\" and indexer.all():\n raise KeyError(f\"{labels} not found in axis\")\n else:\n indexer = ~axis.isin(labels)\n # Check if label doesn't exist along axis\n labels_missing = (axis.get_indexer_for(labels) == -1).any()\n if errors == \"raise\" and labels_missing:\n raise KeyError(f\"{labels} not found in axis\")\n\n slicer = [slice(None)] * self.ndim\n slicer[self._get_axis_number(axis_name)] = indexer\n\n result = self.loc[tuple(slicer)]\n\n return result\n\n def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None:\n \"\"\"\n Replace self internals with result.\n\n Parameters\n ----------\n verify_is_copy : bool, default True\n Provide is_copy checks.\n \"\"\"\n # NOTE: This does *not* call __finalize__ and that's an explicit\n # decision that we may revisit in the future.\n\n self._reset_cache()\n self._clear_item_cache()\n self._data = getattr(result, \"_data\", result)\n self._maybe_update_cacher(verify_is_copy=verify_is_copy)\n\n def add_prefix(self, prefix: str):\n \"\"\"\n Prefix labels with string `prefix`.\n\n For Series, the row labels are prefixed.\n For DataFrame, the column labels are prefixed.\n\n Parameters\n ----------\n prefix : str\n The string to add before each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_suffix: Suffix row labels with string `suffix`.\n DataFrame.add_suffix: Suffix column labels with string `suffix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_prefix('item_')\n item_0 1\n item_1 2\n item_2 3\n item_3 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_prefix('col_')\n col_A col_B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n f = functools.partial(\"{prefix}{}\".format, prefix=prefix)\n\n mapper = {self._info_axis_name: f}\n return self.rename(**mapper)\n\n def add_suffix(self, suffix: str):\n \"\"\"\n Suffix labels with string `suffix`.\n\n For Series, the row labels are suffixed.\n For DataFrame, the column labels are suffixed.\n\n Parameters\n ----------\n suffix : str\n The string to add after each label.\n\n Returns\n -------\n Series or DataFrame\n New Series or DataFrame with updated labels.\n\n See Also\n --------\n Series.add_prefix: Prefix row labels with string `prefix`.\n DataFrame.add_prefix: Prefix column labels with string `prefix`.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.add_suffix('_item')\n 0_item 1\n 1_item 2\n 2_item 3\n 3_item 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n\n >>> df.add_suffix('_col')\n A_col B_col\n 0 1 3\n 1 2 4\n 2 3 5\n 3 4 6\n \"\"\"\n f = functools.partial(\"{}{suffix}\".format, suffix=suffix)\n\n mapper = {self._info_axis_name: f}\n return self.rename(**mapper)\n\n def sort_values(\n self,\n by=None,\n axis=0,\n ascending=True,\n inplace: bool_t = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n ignore_index: bool_t = False,\n ):\n \"\"\"\n Sort by the values along either axis.\n\n Parameters\n ----------%(optional_by)s\n axis : %(axes_single_arg)s, default 0\n Axis to be sorted.\n ascending : bool or list of bool, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders. If this is a list of bools, must match the length of\n the by.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See also ndarray.np.sort for more\n information. `mergesort` is the only stable algorithm. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the\n end.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n sorted_obj : DataFrame or None\n DataFrame with sorted values if inplace=False, None otherwise.\n\n Examples\n --------\n >>> df = pd.DataFrame({\n ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],\n ... 'col2': [2, 1, 9, 8, 7, 4],\n ... 'col3': [0, 1, 9, 4, 2, 3],\n ... })\n >>> df\n col1 col2 col3\n 0 A 2 0\n 1 A 1 1\n 2 B 9 9\n 3 NaN 8 4\n 4 D 7 2\n 5 C 4 3\n\n Sort by col1\n\n >>> df.sort_values(by=['col1'])\n col1 col2 col3\n 0 A 2 0\n 1 A 1 1\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 NaN 8 4\n\n Sort by multiple columns\n\n >>> df.sort_values(by=['col1', 'col2'])\n col1 col2 col3\n 1 A 1 1\n 0 A 2 0\n 2 B 9 9\n 5 C 4 3\n 4 D 7 2\n 3 NaN 8 4\n\n Sort Descending\n\n >>> df.sort_values(by='col1', ascending=False)\n col1 col2 col3\n 4 D 7 2\n 5 C 4 3\n 2 B 9 9\n 0 A 2 0\n 1 A 1 1\n 3 NaN 8 4\n\n Putting NAs first\n\n >>> df.sort_values(by='col1', ascending=False, na_position='first')\n col1 col2 col3\n 3 NaN 8 4\n 4 D 7 2\n 5 C 4 3\n 2 B 9 9\n 0 A 2 0\n 1 A 1 1\n \"\"\"\n raise AbstractMethodError(self)\n\n def sort_index(\n self,\n axis=0,\n level=None,\n ascending: bool_t = True,\n inplace: bool_t = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n sort_remaining: bool_t = True,\n ):\n \"\"\"\n Sort object by labels (along an axis).\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis along which to sort. The value 0 identifies the rows,\n and 1 identifies the columns.\n level : int or level name or list of ints or list of level names\n If not None, sort on values in specified index level(s).\n ascending : bool, default True\n Sort ascending vs. descending.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See also ndarray.np.sort for more\n information. `mergesort` is the only stable algorithm. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.\n Not implemented for MultiIndex.\n sort_remaining : bool, default True\n If True and sorting by level and index is multilevel, sort by other\n levels too (in order) after sorting by specified level.\n\n Returns\n -------\n sorted_obj : DataFrame or None\n DataFrame with sorted index if inplace=False, None otherwise.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n axis = self._get_axis_number(axis)\n axis_name = self._get_axis_name(axis)\n labels = self._get_axis(axis)\n\n if level is not None:\n raise NotImplementedError(\"level is not implemented\")\n if inplace:\n raise NotImplementedError(\"inplace is not implemented\")\n\n sort_index = labels.argsort()\n if not ascending:\n sort_index = sort_index[::-1]\n\n new_axis = labels.take(sort_index)\n return self.reindex(**{axis_name: new_axis})\n\n def reindex(self, *args, **kwargs):\n \"\"\"\n Conform %(klass)s to new index with optional filling logic.\n\n Places NA/NaN in locations having no value in the previous index. A new object\n is produced unless the new index is equivalent to the current one and\n ``copy=False``.\n\n Parameters\n ----------\n %(optional_labels)s\n %(axes)s : array-like, optional\n New labels / index to conform to, should be specified using\n keywords. Preferably an Index object to avoid duplicating data.\n %(optional_axis)s\n method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}\n Method to use for filling holes in reindexed DataFrame.\n Please note: this is only applicable to DataFrames/Series with a\n monotonically increasing/decreasing index.\n\n * None (default): don't fill gaps\n * pad / ffill: Propagate last valid observation forward to next\n valid.\n * backfill / bfill: Use next valid observation to fill gap.\n * nearest: Use nearest valid observations to fill gap.\n\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n level : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value.\n limit : int, default None\n Maximum number of consecutive elements to forward or backward fill.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n %(klass)s with changed index.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n\n ``DataFrame.reindex`` supports two calling conventions\n\n * ``(index=index_labels, columns=column_labels, ...)``\n * ``(labels, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Create a dataframe with some fictional data.\n\n >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']\n >>> df = pd.DataFrame({'http_status': [200, 200, 404, 404, 301],\n ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},\n ... index=index)\n >>> df\n http_status response_time\n Firefox 200 0.04\n Chrome 200 0.02\n Safari 404 0.07\n IE10 404 0.08\n Konqueror 301 1.00\n\n Create a new index and reindex the dataframe. By default\n values in the new index that do not have corresponding\n records in the dataframe are assigned ``NaN``.\n\n >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',\n ... 'Chrome']\n >>> df.reindex(new_index)\n http_status response_time\n Safari 404.0 0.07\n Iceweasel NaN NaN\n Comodo Dragon NaN NaN\n IE10 404.0 0.08\n Chrome 200.0 0.02\n\n We can fill in the missing values by passing a value to\n the keyword ``fill_value``. Because the index is not monotonically\n increasing or decreasing, we cannot use arguments to the keyword\n ``method`` to fill the ``NaN`` values.\n\n >>> df.reindex(new_index, fill_value=0)\n http_status response_time\n Safari 404 0.07\n Iceweasel 0 0.00\n Comodo Dragon 0 0.00\n IE10 404 0.08\n Chrome 200 0.02\n\n >>> df.reindex(new_index, fill_value='missing')\n http_status response_time\n Safari 404 0.07\n Iceweasel missing missing\n Comodo Dragon missing missing\n IE10 404 0.08\n Chrome 200 0.02\n\n We can also reindex the columns.\n\n >>> df.reindex(columns=['http_status', 'user_agent'])\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n Or we can use \"axis-style\" keyword arguments\n\n >>> df.reindex(['http_status', 'user_agent'], axis=\"columns\")\n http_status user_agent\n Firefox 200 NaN\n Chrome 200 NaN\n Safari 404 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n\n To further illustrate the filling functionality in\n ``reindex``, we will create a dataframe with a\n monotonically increasing index (for example, a sequence\n of dates).\n\n >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')\n >>> df2 = pd.DataFrame({\"prices\": [100, 101, np.nan, 100, 89, 88]},\n ... index=date_index)\n >>> df2\n prices\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n\n Suppose we decide to expand the dataframe to cover a wider\n date range.\n\n >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')\n >>> df2.reindex(date_index2)\n prices\n 2009-12-29 NaN\n 2009-12-30 NaN\n 2009-12-31 NaN\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n The index entries that did not have a value in the original data frame\n (for example, '2009-12-29') are by default filled with ``NaN``.\n If desired, we can fill in the missing values using one of several\n options.\n\n For example, to back-propagate the last valid value to fill the ``NaN``\n values, pass ``bfill`` as an argument to the ``method`` keyword.\n\n >>> df2.reindex(date_index2, method='bfill')\n prices\n 2009-12-29 100.0\n 2009-12-30 100.0\n 2009-12-31 100.0\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n\n Please note that the ``NaN`` value present in the original dataframe\n (at index value 2010-01-03) will not be filled by any of the\n value propagation schemes. This is because filling while reindexing\n does not look at dataframe values, but only compares the original and\n desired indexes. If you do want to fill in the ``NaN`` values present\n in the original dataframe, use the ``fillna()`` method.\n\n See the :ref:`user guide <basics.reindexing>` for more.\n \"\"\"\n # TODO: Decide if we care about having different examples for different\n # kinds\n\n # construct the args\n axes, kwargs = self._construct_axes_from_arguments(args, kwargs)\n method = missing.clean_reindex_fill_method(kwargs.pop(\"method\", None))\n level = kwargs.pop(\"level\", None)\n copy = kwargs.pop(\"copy\", True)\n limit = kwargs.pop(\"limit\", None)\n tolerance = kwargs.pop(\"tolerance\", None)\n fill_value = kwargs.pop(\"fill_value\", None)\n\n # Series.reindex doesn't use / need the axis kwarg\n # We pop and ignore it here, to make writing Series/Frame generic code\n # easier\n kwargs.pop(\"axis\", None)\n\n if kwargs:\n raise TypeError(\n \"reindex() got an unexpected keyword \"\n f'argument \"{list(kwargs.keys())[0]}\"'\n )\n\n self._consolidate_inplace()\n\n # if all axes that are requested to reindex are equal, then only copy\n # if indicated must have index names equal here as well as values\n if all(\n self._get_axis(axis).identical(ax)\n for axis, ax in axes.items()\n if ax is not None\n ):\n if copy:\n return self.copy()\n return self\n\n # check if we are a multi reindex\n if self._needs_reindex_multi(axes, method, level):\n return self._reindex_multi(axes, copy, fill_value)\n\n # perform the reindex on the axes\n return self._reindex_axes(\n axes, level, limit, tolerance, method, fill_value, copy\n ).__finalize__(self)\n\n def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):\n \"\"\"Perform the reindex for all the axes.\"\"\"\n obj = self\n for a in self._AXIS_ORDERS:\n labels = axes[a]\n if labels is None:\n continue\n\n ax = self._get_axis(a)\n new_index, indexer = ax.reindex(\n labels, level=level, limit=limit, tolerance=tolerance, method=method\n )\n\n axis = self._get_axis_number(a)\n obj = obj._reindex_with_indexers(\n {axis: [new_index, indexer]},\n fill_value=fill_value,\n copy=copy,\n allow_dups=False,\n )\n\n return obj\n\n def _needs_reindex_multi(self, axes, method, level) -> bool_t:\n \"\"\"Check if we do need a multi reindex.\"\"\"\n return (\n (com.count_not_none(*axes.values()) == self._AXIS_LEN)\n and method is None\n and level is None\n and not self._is_mixed_type\n )\n\n def _reindex_multi(self, axes, copy, fill_value):\n raise AbstractMethodError(self)\n\n def _reindex_with_indexers(\n self,\n reindexers,\n fill_value=None,\n copy: bool_t = False,\n allow_dups: bool_t = False,\n ):\n \"\"\"allow_dups indicates an internal call here \"\"\"\n\n # reindex doing multiple operations on different axes if indicated\n new_data = self._data\n for axis in sorted(reindexers.keys()):\n index, indexer = reindexers[axis]\n baxis = self._get_block_manager_axis(axis)\n\n if index is None:\n continue\n\n index = ensure_index(index)\n if indexer is not None:\n indexer = ensure_int64(indexer)\n\n # TODO: speed up on homogeneous DataFrame objects\n new_data = new_data.reindex_indexer(\n index,\n indexer,\n axis=baxis,\n fill_value=fill_value,\n allow_dups=allow_dups,\n copy=copy,\n )\n\n if copy and new_data is self._data:\n new_data = new_data.copy()\n\n return self._constructor(new_data).__finalize__(self)\n\n def filter(\n self,\n items=None,\n like: Optional[str] = None,\n regex: Optional[str] = None,\n axis=None,\n ):\n \"\"\"\n Subset the dataframe rows or columns according to the specified index labels.\n\n Note that this routine does not filter a dataframe on its\n contents. The filter is applied to the labels of the index.\n\n Parameters\n ----------\n items : list-like\n Keep labels from axis which are in items.\n like : str\n Keep labels from axis for which \"like in label == True\".\n regex : str (regular expression)\n Keep labels from axis for which re.search(regex, label) == True.\n axis : {0 or ‘index’, 1 or ‘columns’, None}, default None\n The axis to filter on, expressed either as an index (int)\n or axis name (str). By default this is the info axis,\n 'index' for Series, 'columns' for DataFrame.\n\n Returns\n -------\n same type as input object\n\n See Also\n --------\n DataFrame.loc\n\n Notes\n -----\n The ``items``, ``like``, and ``regex`` parameters are\n enforced to be mutually exclusive.\n\n ``axis`` defaults to the info axis that is used when indexing\n with ``[]``.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),\n ... index=['mouse', 'rabbit'],\n ... columns=['one', 'two', 'three'])\n\n >>> # select columns by name\n >>> df.filter(items=['one', 'three'])\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select columns by regular expression\n >>> df.filter(regex='e$', axis=1)\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select rows containing 'bbi'\n >>> df.filter(like='bbi', axis=0)\n one two three\n rabbit 4 5 6\n \"\"\"\n nkw = com.count_not_none(items, like, regex)\n if nkw > 1:\n raise TypeError(\n \"Keyword arguments `items`, `like`, or `regex` \"\n \"are mutually exclusive\"\n )\n\n if axis is None:\n axis = self._info_axis_name\n labels = self._get_axis(axis)\n\n if items is not None:\n name = self._get_axis_name(axis)\n return self.reindex(**{name: [r for r in items if r in labels]})\n elif like:\n\n def f(x):\n return like in ensure_str(x)\n\n values = labels.map(f)\n return self.loc(axis=axis)[values]\n elif regex:\n\n def f(x):\n return matcher.search(ensure_str(x)) is not None\n\n matcher = re.compile(regex)\n values = labels.map(f)\n return self.loc(axis=axis)[values]\n else:\n raise TypeError(\"Must pass either `items`, `like`, or `regex`\")\n\n def head(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:\n \"\"\"\n Return the first `n` rows.\n\n This function returns the first `n` rows for the object based\n on position. It is useful for quickly testing if your object\n has the right type of data in it.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n obj_head : same type as caller\n The first `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.tail: Returns the last `n` rows.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the first 5 lines\n\n >>> df.head()\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n\n Viewing the first `n` lines (three in this case)\n\n >>> df.head(3)\n animal\n 0 alligator\n 1 bee\n 2 falcon\n \"\"\"\n\n return self.iloc[:n]\n\n def tail(self: FrameOrSeries, n: int = 5) -> FrameOrSeries:\n \"\"\"\n Return the last `n` rows.\n\n This function returns last `n` rows from the object based on\n position. It is useful for quickly verifying data, for example,\n after sorting or appending rows.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n type of caller\n The last `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.head : The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last 5 lines\n\n >>> df.tail()\n animal\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last `n` lines (three in this case)\n\n >>> df.tail(3)\n animal\n 6 shark\n 7 whale\n 8 zebra\n \"\"\"\n\n if n == 0:\n return self.iloc[0:0]\n return self.iloc[-n:]\n\n def sample(\n self,\n n=None,\n frac=None,\n replace=False,\n weights=None,\n random_state=None,\n axis=None,\n ):\n \"\"\"\n Return a random sample of items from an axis of object.\n\n You can use `random_state` for reproducibility.\n\n Parameters\n ----------\n n : int, optional\n Number of items from axis to return. Cannot be used with `frac`.\n Default = 1 if `frac` = None.\n frac : float, optional\n Fraction of axis items to return. Cannot be used with `n`.\n replace : bool, default False\n Allow or disallow sampling of the same row more than once.\n weights : str or ndarray-like, optional\n Default 'None' results in equal probability weighting.\n If passed a Series, will align with target object on index. Index\n values in weights not found in sampled object will be ignored and\n index values in sampled object not in weights will be assigned\n weights of zero.\n If called on a DataFrame, will accept the name of a column\n when axis = 0.\n Unless weights are a Series, weights must be same length as axis\n being sampled.\n If weights do not sum to 1, they will be normalized to sum to 1.\n Missing values in the weights column will be treated as zero.\n Infinite values not allowed.\n random_state : int or numpy.random.RandomState, optional\n Seed for the random number generator (if int), or numpy RandomState\n object.\n axis : {0 or ‘index’, 1 or ‘columns’, None}, default None\n Axis to sample. Accepts axis number or name. Default is stat axis\n for given data type (0 for Series and DataFrames).\n\n Returns\n -------\n Series or DataFrame\n A new object of same type as caller containing `n` items randomly\n sampled from the caller object.\n\n See Also\n --------\n numpy.random.choice: Generates a random sample from a given 1-D numpy\n array.\n\n Notes\n -----\n If `frac` > 1, `replacement` should be set to `True`.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],\n ... 'num_wings': [2, 0, 0, 0],\n ... 'num_specimen_seen': [10, 2, 1, 8]},\n ... index=['falcon', 'dog', 'spider', 'fish'])\n >>> df\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n dog 4 0 2\n spider 8 0 1\n fish 0 0 8\n\n Extract 3 random elements from the ``Series`` ``df['num_legs']``:\n Note that we use `random_state` to ensure the reproducibility of\n the examples.\n\n >>> df['num_legs'].sample(n=3, random_state=1)\n fish 0\n spider 8\n falcon 2\n Name: num_legs, dtype: int64\n\n A random 50% sample of the ``DataFrame`` with replacement:\n\n >>> df.sample(frac=0.5, replace=True, random_state=1)\n num_legs num_wings num_specimen_seen\n dog 4 0 2\n fish 0 0 8\n\n An upsample sample of the ``DataFrame`` with replacement:\n Note that `replace` parameter has to be `True` for `frac` parameter > 1.\n\n >>> df.sample(frac=2, replace=True, random_state=1)\n num_legs num_wings num_specimen_seen\n dog 4 0 2\n fish 0 0 8\n falcon 2 2 10\n falcon 2 2 10\n fish 0 0 8\n dog 4 0 2\n fish 0 0 8\n dog 4 0 2\n\n Using a DataFrame column as weights. Rows with larger value in the\n `num_specimen_seen` column are more likely to be sampled.\n\n >>> df.sample(n=2, weights='num_specimen_seen', random_state=1)\n num_legs num_wings num_specimen_seen\n falcon 2 2 10\n fish 0 0 8\n \"\"\"\n\n if axis is None:\n axis = self._stat_axis_number\n\n axis = self._get_axis_number(axis)\n axis_length = self.shape[axis]\n\n # Process random_state argument\n rs = com.random_state(random_state)\n\n # Check weights for compliance\n if weights is not None:\n\n # If a series, align with frame\n if isinstance(weights, ABCSeries):\n weights = weights.reindex(self.axes[axis])\n\n # Strings acceptable if a dataframe and axis = 0\n if isinstance(weights, str):\n if isinstance(self, ABCDataFrame):\n if axis == 0:\n try:\n weights = self[weights]\n except KeyError:\n raise KeyError(\n \"String passed to weights not a valid column\"\n )\n else:\n raise ValueError(\n \"Strings can only be passed to \"\n \"weights when sampling from rows on \"\n \"a DataFrame\"\n )\n else:\n raise ValueError(\n \"Strings cannot be passed as weights \"\n \"when sampling from a Series.\"\n )\n\n weights = pd.Series(weights, dtype=\"float64\")\n\n if len(weights) != axis_length:\n raise ValueError(\n \"Weights and axis to be sampled must be of same length\"\n )\n\n if (weights == np.inf).any() or (weights == -np.inf).any():\n raise ValueError(\"weight vector may not include `inf` values\")\n\n if (weights < 0).any():\n raise ValueError(\"weight vector many not include negative values\")\n\n # If has nan, set to zero.\n weights = weights.fillna(0)\n\n # Renormalize if don't sum to 1\n if weights.sum() != 1:\n if weights.sum() != 0:\n weights = weights / weights.sum()\n else:\n raise ValueError(\"Invalid weights: weights sum to zero\")\n\n weights = weights.values\n\n # If no frac or n, default to n=1.\n if n is None and frac is None:\n n = 1\n elif frac is not None and frac > 1 and not replace:\n raise ValueError(\n \"Replace has to be set to `True` when \"\n \"upsampling the population `frac` > 1.\"\n )\n elif n is not None and frac is None and n % 1 != 0:\n raise ValueError(\"Only integers accepted as `n` values\")\n elif n is None and frac is not None:\n n = int(round(frac * axis_length))\n elif n is not None and frac is not None:\n raise ValueError(\"Please enter a value for `frac` OR `n`, not both\")\n\n # Check for negative sizes\n if n < 0:\n raise ValueError(\n \"A negative number of rows requested. Please provide positive value.\"\n )\n\n locs = rs.choice(axis_length, size=n, replace=replace, p=weights)\n return self.take(locs, axis=axis, is_copy=False)\n\n _shared_docs[\n \"pipe\"\n ] = r\"\"\"\n Apply func(self, \\*args, \\*\\*kwargs).\n\n Parameters\n ----------\n func : function\n Function to apply to the %(klass)s.\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the %(klass)s.\n args : iterable, optional\n Positional arguments passed into ``func``.\n kwargs : mapping, optional\n A dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n See Also\n --------\n DataFrame.apply\n DataFrame.applymap\n Series.map\n\n Notes\n -----\n\n Use ``.pipe`` when chaining together functions that expect\n Series, DataFrames or GroupBy objects. Instead of writing\n\n >>> f(g(h(df), arg1=a), arg2=b, arg3=c)\n\n You can write\n\n >>> (df.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe(f, arg2=b, arg3=c)\n ... )\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``arg2``:\n\n >>> (df.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe((f, 'arg2'), arg1=a, arg3=c)\n ... )\n \"\"\"\n\n @Appender(_shared_docs[\"pipe\"] % _shared_doc_kwargs)\n def pipe(self, func, *args, **kwargs):\n return com.pipe(self, func, *args, **kwargs)\n\n _shared_docs[\"aggregate\"] = dedent(\n \"\"\"\n Aggregate using one or more operations over the specified axis.\n %(versionadded)s\n Parameters\n ----------\n func : function, str, list or dict\n Function to use for aggregating the data. If a function, must either\n work when passed a %(klass)s or when passed to %(klass)s.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.sum, 'mean']``\n - dict of axis labels -> functions, function names or list of such.\n %(axis)s\n *args\n Positional arguments to pass to `func`.\n **kwargs\n Keyword arguments to pass to `func`.\n\n Returns\n -------\n scalar, Series or DataFrame\n\n The return can be:\n\n * scalar : when Series.agg is called with single function\n * Series : when DataFrame.agg is called with a single function\n * DataFrame : when DataFrame.agg is called with several functions\n\n Return scalar, Series or DataFrame.\n %(see_also)s\n Notes\n -----\n `agg` is an alias for `aggregate`. Use the alias.\n\n A passed user-defined-function will be passed a Series for evaluation.\n %(examples)s\"\"\"\n )\n\n _shared_docs[\n \"transform\"\n ] = \"\"\"\n Call ``func`` on self producing a %(klass)s with transformed values.\n\n Produced %(klass)s will have same axis length as self.\n\n Parameters\n ----------\n func : function, str, list or dict\n Function to use for transforming the data. If a function, must either\n work when passed a %(klass)s or when passed to %(klass)s.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``\n - dict of axis labels -> functions, function names or list of such.\n %(axis)s\n *args\n Positional arguments to pass to `func`.\n **kwargs\n Keyword arguments to pass to `func`.\n\n Returns\n -------\n %(klass)s\n A %(klass)s that must have the same length as self.\n\n Raises\n ------\n ValueError : If the returned %(klass)s has a different length than self.\n\n See Also\n --------\n %(klass)s.agg : Only perform aggregating type operations.\n %(klass)s.apply : Invoke function on a %(klass)s.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})\n >>> df\n A B\n 0 0 1\n 1 1 2\n 2 2 3\n >>> df.transform(lambda x: x + 1)\n A B\n 0 1 2\n 1 2 3\n 2 3 4\n\n Even though the resulting %(klass)s must have the same length as the\n input %(klass)s, it is possible to provide several input functions:\n\n >>> s = pd.Series(range(3))\n >>> s\n 0 0\n 1 1\n 2 2\n dtype: int64\n >>> s.transform([np.sqrt, np.exp])\n sqrt exp\n 0 0.000000 1.000000\n 1 1.000000 2.718282\n 2 1.414214 7.389056\n \"\"\"\n\n # ----------------------------------------------------------------------\n # Attribute access\n\n def __finalize__(\n self: FrameOrSeries, other, method=None, **kwargs\n ) -> FrameOrSeries:\n \"\"\"\n Propagate metadata from other to self.\n\n Parameters\n ----------\n other : the object from which to get the attributes that we are going\n to propagate\n method : optional, a passed method name ; possibly to take different\n types of propagation actions based on this\n\n \"\"\"\n if isinstance(other, NDFrame):\n for name in other.attrs:\n self.attrs[name] = other.attrs[name]\n # For subclasses using _metadata.\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other, name, None))\n return self\n\n def __getattr__(self, name: str):\n \"\"\"After regular attribute access, try looking up the name\n This allows simpler access to columns for interactive use.\n \"\"\"\n\n # Note: obj.x will always call obj.__getattribute__('x') prior to\n # calling obj.__getattr__('x').\n\n if (\n name in self._internal_names_set\n or name in self._metadata\n or name in self._accessors\n ):\n return object.__getattribute__(self, name)\n else:\n if self._info_axis._can_hold_identifiers_and_holds_name(name):\n return self[name]\n return object.__getattribute__(self, name)\n\n def __setattr__(self, name: str, value) -> None:\n \"\"\"After regular attribute access, try setting the name\n This allows simpler access to columns for interactive use.\n \"\"\"\n\n # first try regular attribute access via __getattribute__, so that\n # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify\n # the same attribute.\n\n try:\n object.__getattribute__(self, name)\n return object.__setattr__(self, name, value)\n except AttributeError:\n pass\n\n # if this fails, go on to more involved attribute setting\n # (note that this matches __getattr__, above).\n if name in self._internal_names_set:\n object.__setattr__(self, name, value)\n elif name in self._metadata:\n object.__setattr__(self, name, value)\n else:\n try:\n existing = getattr(self, name)\n if isinstance(existing, Index):\n object.__setattr__(self, name, value)\n elif name in self._info_axis:\n self[name] = value\n else:\n object.__setattr__(self, name, value)\n except (AttributeError, TypeError):\n if isinstance(self, ABCDataFrame) and (is_list_like(value)):\n warnings.warn(\n \"Pandas doesn't allow columns to be \"\n \"created via a new attribute name - see \"\n \"https://pandas.pydata.org/pandas-docs/\"\n \"stable/indexing.html#attribute-access\",\n stacklevel=2,\n )\n object.__setattr__(self, name, value)\n\n def _dir_additions(self):\n \"\"\" add the string-like attributes from the info_axis.\n If info_axis is a MultiIndex, it's first level values are used.\n \"\"\"\n additions = {\n c\n for c in self._info_axis.unique(level=0)[:100]\n if isinstance(c, str) and c.isidentifier()\n }\n return super()._dir_additions().union(additions)\n\n # ----------------------------------------------------------------------\n # Consolidation of internals\n\n def _protect_consolidate(self, f):\n \"\"\"Consolidate _data -- if the blocks have changed, then clear the\n cache\n \"\"\"\n blocks_before = len(self._data.blocks)\n result = f()\n if len(self._data.blocks) != blocks_before:\n self._clear_item_cache()\n return result\n\n def _consolidate_inplace(self) -> None:\n \"\"\"Consolidate data in place and return None\"\"\"\n\n def f():\n self._data = self._data.consolidate()\n\n self._protect_consolidate(f)\n\n def _consolidate(self, inplace: bool_t = False):\n \"\"\"\n Compute NDFrame with \"consolidated\" internals (data of each dtype\n grouped together in a single ndarray).\n\n Parameters\n ----------\n inplace : bool, default False\n If False return new object, otherwise modify existing object.\n\n Returns\n -------\n consolidated : same type as caller\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if inplace:\n self._consolidate_inplace()\n else:\n f = lambda: self._data.consolidate()\n cons_data = self._protect_consolidate(f)\n return self._constructor(cons_data).__finalize__(self)\n\n @property\n def _is_mixed_type(self):\n f = lambda: self._data.is_mixed_type\n return self._protect_consolidate(f)\n\n @property\n def _is_numeric_mixed_type(self):\n f = lambda: self._data.is_numeric_mixed_type\n return self._protect_consolidate(f)\n\n @property\n def _is_datelike_mixed_type(self):\n f = lambda: self._data.is_datelike_mixed_type\n return self._protect_consolidate(f)\n\n def _check_inplace_setting(self, value) -> bool_t:\n \"\"\" check whether we allow in-place setting with this type of value \"\"\"\n\n if self._is_mixed_type:\n if not self._is_numeric_mixed_type:\n\n # allow an actual np.nan thru\n if is_float(value) and np.isnan(value):\n return True\n\n raise TypeError(\n \"Cannot do inplace boolean setting on \"\n \"mixed-types with a non np.nan value\"\n )\n\n return True\n\n def _get_numeric_data(self):\n return self._constructor(self._data.get_numeric_data()).__finalize__(self)\n\n def _get_bool_data(self):\n return self._constructor(self._data.get_bool_data()).__finalize__(self)\n\n # ----------------------------------------------------------------------\n # Internal Interface Methods\n\n @property\n def values(self):\n \"\"\"\n Return a Numpy representation of the DataFrame.\n\n .. warning::\n\n We recommend using :meth:`DataFrame.to_numpy` instead.\n\n Only the values in the DataFrame will be returned, the axes labels\n will be removed.\n\n Returns\n -------\n numpy.ndarray\n The values of the DataFrame.\n\n See Also\n --------\n DataFrame.to_numpy : Recommended alternative to this method.\n DataFrame.index : Retrieve the index labels.\n DataFrame.columns : Retrieving the column names.\n\n Notes\n -----\n The dtype will be a lower-common-denominator dtype (implicit\n upcasting); that is to say if the dtypes (even of numeric types)\n are mixed, the one that accommodates all will be chosen. Use this\n with care if you are not dealing with the blocks.\n\n e.g. If the dtypes are float16 and float32, dtype will be upcast to\n float32. If dtypes are int32 and uint8, dtype will be upcast to\n int32. By :func:`numpy.find_common_type` convention, mixing int64\n and uint64 will result in a float64 dtype.\n\n Examples\n --------\n A DataFrame where all columns are the same type (e.g., int64) results\n in an array of the same type.\n\n >>> df = pd.DataFrame({'age': [ 3, 29],\n ... 'height': [94, 170],\n ... 'weight': [31, 115]})\n >>> df\n age height weight\n 0 3 94 31\n 1 29 170 115\n >>> df.dtypes\n age int64\n height int64\n weight int64\n dtype: object\n >>> df.values\n array([[ 3, 94, 31],\n [ 29, 170, 115]], dtype=int64)\n\n A DataFrame with mixed type columns(e.g., str/object, int64, float32)\n results in an ndarray of the broadest type that accommodates these\n mixed types (e.g., object).\n\n >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),\n ... ('lion', 80.5, 1),\n ... ('monkey', np.nan, None)],\n ... columns=('name', 'max_speed', 'rank'))\n >>> df2.dtypes\n name object\n max_speed float64\n rank object\n dtype: object\n >>> df2.values\n array([['parrot', 24.0, 'second'],\n ['lion', 80.5, 1],\n ['monkey', nan, None]], dtype=object)\n \"\"\"\n self._consolidate_inplace()\n return self._data.as_array(transpose=self._AXIS_REVERSED)\n\n @property\n def _values(self):\n \"\"\"internal implementation\"\"\"\n return self.values\n\n @property\n def _get_values(self):\n # compat\n return self.values\n\n def _internal_get_values(self):\n \"\"\"\n Return an ndarray after converting sparse values to dense.\n\n This is the same as ``.values`` for non-sparse data. For sparse\n data contained in a `SparseArray`, the data are first\n converted to a dense representation.\n\n Returns\n -------\n numpy.ndarray\n Numpy representation of DataFrame.\n\n See Also\n --------\n values : Numpy representation of DataFrame.\n SparseArray : Container for sparse data.\n \"\"\"\n return self.values\n\n @property\n def dtypes(self):\n \"\"\"\n Return the dtypes in the DataFrame.\n\n This returns a Series with the data type of each column.\n The result's index is the original DataFrame's columns. Columns\n with mixed types are stored with the ``object`` dtype. See\n :ref:`the User Guide <basics.dtypes>` for more.\n\n Returns\n -------\n pandas.Series\n The data type of each column.\n\n Examples\n --------\n >>> df = pd.DataFrame({'float': [1.0],\n ... 'int': [1],\n ... 'datetime': [pd.Timestamp('20180310')],\n ... 'string': ['foo']})\n >>> df.dtypes\n float float64\n int int64\n datetime datetime64[ns]\n string object\n dtype: object\n \"\"\"\n from pandas import Series\n\n return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)\n\n def _to_dict_of_blocks(self, copy: bool_t = True):\n \"\"\"\n Return a dict of dtype -> Constructor Types that\n each is a homogeneous dtype.\n\n Internal ONLY\n \"\"\"\n return {\n k: self._constructor(v).__finalize__(self)\n for k, v, in self._data.to_dict(copy=copy).items()\n }\n\n def astype(self, dtype, copy: bool_t = True, errors: str = \"raise\"):\n \"\"\"\n Cast a pandas object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type, or dict of column name -> data type\n Use a numpy.dtype or Python type to cast entire pandas object to\n the same type. Alternatively, use {col: dtype, ...}, where col is a\n column label and dtype is a numpy.dtype or Python type to cast one\n or more of the DataFrame's columns to column-specific types.\n copy : bool, default True\n Return a copy when ``copy=True`` (be very careful setting\n ``copy=False`` as changes to values then may propagate to other\n pandas objects).\n errors : {'raise', 'ignore'}, default 'raise'\n Control raising of exceptions on invalid data for provided dtype.\n\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object.\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to a numeric type.\n numpy.ndarray.astype : Cast a numpy array to a specified type.\n\n Examples\n --------\n Create a DataFrame:\n\n >>> d = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df = pd.DataFrame(data=d)\n >>> df.dtypes\n col1 int64\n col2 int64\n dtype: object\n\n Cast all columns to int32:\n\n >>> df.astype('int32').dtypes\n col1 int32\n col2 int32\n dtype: object\n\n Cast col1 to int32 using a dictionary:\n\n >>> df.astype({'col1': 'int32'}).dtypes\n col1 int32\n col2 int64\n dtype: object\n\n Create a series:\n\n >>> ser = pd.Series([1, 2], dtype='int32')\n >>> ser\n 0 1\n 1 2\n dtype: int32\n >>> ser.astype('int64')\n 0 1\n 1 2\n dtype: int64\n\n Convert to categorical type:\n\n >>> ser.astype('category')\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [1, 2]\n\n Convert to ordered categorical type with custom ordering:\n\n >>> cat_dtype = pd.api.types.CategoricalDtype(\n ... categories=[2, 1], ordered=True)\n >>> ser.astype(cat_dtype)\n 0 1\n 1 2\n dtype: category\n Categories (2, int64): [2 < 1]\n\n Note that using ``copy=False`` and changing data on a new\n pandas object may propagate changes:\n\n >>> s1 = pd.Series([1, 2])\n >>> s2 = s1.astype('int64', copy=False)\n >>> s2[0] = 10\n >>> s1 # note that s1[0] has changed too\n 0 10\n 1 2\n dtype: int64\n \"\"\"\n if is_dict_like(dtype):\n if self.ndim == 1: # i.e. Series\n if len(dtype) > 1 or self.name not in dtype:\n raise KeyError(\n \"Only the Series name can be used for \"\n \"the key in Series dtype mappings.\"\n )\n new_type = dtype[self.name]\n return self.astype(new_type, copy, errors)\n\n for col_name in dtype.keys():\n if col_name not in self:\n raise KeyError(\n \"Only a column name can be used for the \"\n \"key in a dtype mappings argument.\"\n )\n results = []\n for col_name, col in self.items():\n if col_name in dtype:\n results.append(\n col.astype(dtype=dtype[col_name], copy=copy, errors=errors)\n )\n else:\n results.append(col.copy() if copy else col)\n\n elif is_extension_array_dtype(dtype) and self.ndim > 1:\n # GH 18099/22869: columnwise conversion to extension dtype\n # GH 24704: use iloc to handle duplicate column names\n results = [\n self.iloc[:, i].astype(dtype, copy=copy)\n for i in range(len(self.columns))\n ]\n\n else:\n # else, only a single dtype is given\n new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors)\n return self._constructor(new_data).__finalize__(self)\n\n # GH 19920: retain column metadata after concat\n result = pd.concat(results, axis=1, copy=False)\n result.columns = self.columns\n return result\n\n def copy(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:\n \"\"\"\n Make a copy of this object's indices and data.\n\n When ``deep=True`` (default), a new object will be created with a\n copy of the calling object's data and indices. Modifications to\n the data or indices of the copy will not be reflected in the\n original object (see notes below).\n\n When ``deep=False``, a new object will be created without copying\n the calling object's data or index (only references to the data\n and index are copied). Any changes to the data of the original\n will be reflected in the shallow copy (and vice versa).\n\n Parameters\n ----------\n deep : bool, default True\n Make a deep copy, including a copy of the data and the indices.\n With ``deep=False`` neither the indices nor the data are copied.\n\n Returns\n -------\n copy : Series or DataFrame\n Object type matches caller.\n\n Notes\n -----\n When ``deep=True``, data is copied but actual Python objects\n will not be copied recursively, only the reference to the object.\n This is in contrast to `copy.deepcopy` in the Standard Library,\n which recursively copies object data (see examples below).\n\n While ``Index`` objects are copied when ``deep=True``, the underlying\n numpy array is not copied for performance reasons. Since ``Index`` is\n immutable, the underlying data can be safely shared and a copy\n is not needed.\n\n Examples\n --------\n >>> s = pd.Series([1, 2], index=[\"a\", \"b\"])\n >>> s\n a 1\n b 2\n dtype: int64\n\n >>> s_copy = s.copy()\n >>> s_copy\n a 1\n b 2\n dtype: int64\n\n **Shallow copy versus default (deep) copy:**\n\n >>> s = pd.Series([1, 2], index=[\"a\", \"b\"])\n >>> deep = s.copy()\n >>> shallow = s.copy(deep=False)\n\n Shallow copy shares data and index with original.\n\n >>> s is shallow\n False\n >>> s.values is shallow.values and s.index is shallow.index\n True\n\n Deep copy has own copy of data and index.\n\n >>> s is deep\n False\n >>> s.values is deep.values or s.index is deep.index\n False\n\n Updates to the data shared by shallow copy and original is reflected\n in both; deep copy remains unchanged.\n\n >>> s[0] = 3\n >>> shallow[1] = 4\n >>> s\n a 3\n b 4\n dtype: int64\n >>> shallow\n a 3\n b 4\n dtype: int64\n >>> deep\n a 1\n b 2\n dtype: int64\n\n Note that when copying an object containing Python objects, a deep copy\n will copy the data, but will not do so recursively. Updating a nested\n data object will be reflected in the deep copy.\n\n >>> s = pd.Series([[1, 2], [3, 4]])\n >>> deep = s.copy()\n >>> s[0][0] = 10\n >>> s\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n >>> deep\n 0 [10, 2]\n 1 [3, 4]\n dtype: object\n \"\"\"\n data = self._data.copy(deep=deep)\n return self._constructor(data).__finalize__(self)\n\n def __copy__(self: FrameOrSeries, deep: bool_t = True) -> FrameOrSeries:\n return self.copy(deep=deep)\n\n def __deepcopy__(self: FrameOrSeries, memo=None) -> FrameOrSeries:\n \"\"\"\n Parameters\n ----------\n memo, default None\n Standard signature. Unused\n \"\"\"\n return self.copy(deep=True)\n\n def _convert(\n self: FrameOrSeries,\n datetime: bool_t = False,\n numeric: bool_t = False,\n timedelta: bool_t = False,\n coerce: bool_t = False,\n copy: bool_t = True,\n ) -> FrameOrSeries:\n \"\"\"\n Attempt to infer better dtype for object columns\n\n Parameters\n ----------\n datetime : bool, default False\n If True, convert to date where possible.\n numeric : bool, default False\n If True, attempt to convert to numbers (including strings), with\n unconvertible values becoming NaN.\n timedelta : bool, default False\n If True, convert to timedelta where possible.\n coerce : bool, default False\n If True, force conversion with unconvertible values converted to\n nulls (NaN or NaT).\n copy : bool, default True\n If True, return a copy even if no copy is necessary (e.g. no\n conversion was done). Note: This is meant for internal use, and\n should not be confused with inplace.\n\n Returns\n -------\n converted : same as input object\n \"\"\"\n validate_bool_kwarg(datetime, \"datetime\")\n validate_bool_kwarg(numeric, \"numeric\")\n validate_bool_kwarg(timedelta, \"timedelta\")\n validate_bool_kwarg(coerce, \"coerce\")\n validate_bool_kwarg(copy, \"copy\")\n return self._constructor(\n self._data.convert(\n datetime=datetime,\n numeric=numeric,\n timedelta=timedelta,\n coerce=coerce,\n copy=copy,\n )\n ).__finalize__(self)\n\n def infer_objects(self):\n \"\"\"\n Attempt to infer better dtypes for object columns.\n\n Attempts soft conversion of object-dtyped\n columns, leaving non-object and unconvertible\n columns unchanged. The inference rules are the\n same as during normal Series/DataFrame construction.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n converted : same type as input object\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n to_numeric : Convert argument to numeric type.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [\"a\", 1, 2, 3]})\n >>> df = df.iloc[1:]\n >>> df\n A\n 1 1\n 2 2\n 3 3\n\n >>> df.dtypes\n A object\n dtype: object\n\n >>> df.infer_objects().dtypes\n A int64\n dtype: object\n \"\"\"\n # numeric=False necessary to only soft convert;\n # python objects will still be converted to\n # native numpy numeric types\n return self._constructor(\n self._data.convert(\n datetime=True, numeric=False, timedelta=True, coerce=False, copy=True\n )\n ).__finalize__(self)\n\n # ----------------------------------------------------------------------\n # Filling NA's\n\n def fillna(\n self: FrameOrSeries,\n value=None,\n method=None,\n axis=None,\n inplace: bool_t = False,\n limit=None,\n downcast=None,\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, dict, Series, or DataFrame\n Value to use to fill holes (e.g. 0), alternately a\n dict/Series/DataFrame of values specifying which value to use for\n each index (for a Series) or column (for a DataFrame). Values not\n in the dict/Series/DataFrame will not be filled. This value cannot\n be a list.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use next valid observation to fill gap.\n axis : %(axes_single_arg)s\n Axis along which to fill missing values.\n inplace : bool, default False\n If True, fill in-place. Note: this will modify any\n other views on this object (e.g., a no-copy slice for a column in a\n DataFrame).\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n downcast : dict, default is None\n A dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible).\n\n Returns\n -------\n %(klass)s or None\n Object with missing values filled or None if ``inplace=True``.\n\n See Also\n --------\n interpolate : Fill NaN values using interpolation.\n reindex : Conform object to new index.\n asfreq : Convert TimeSeries to specified frequency.\n\n Examples\n --------\n >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],\n ... [3, 4, np.nan, 1],\n ... [np.nan, np.nan, np.nan, 5],\n ... [np.nan, 3, np.nan, 4]],\n ... columns=list('ABCD'))\n >>> df\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 NaN 4\n\n Replace all NaN elements with 0s.\n\n >>> df.fillna(0)\n A B C D\n 0 0.0 2.0 0.0 0\n 1 3.0 4.0 0.0 1\n 2 0.0 0.0 0.0 5\n 3 0.0 3.0 0.0 4\n\n We can also propagate non-null values forward or backward.\n\n >>> df.fillna(method='ffill')\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 NaN 4\n\n Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,\n 2, and 3 respectively.\n\n >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}\n >>> df.fillna(value=values)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 2.0 1\n 2 0.0 1.0 2.0 5\n 3 0.0 3.0 2.0 4\n\n Only replace the first NaN element.\n\n >>> df.fillna(value=values, limit=1)\n A B C D\n 0 0.0 2.0 2.0 0\n 1 3.0 4.0 NaN 1\n 2 NaN 1.0 NaN 5\n 3 NaN 3.0 NaN 4\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n value, method = validate_fillna_kwargs(value, method)\n\n self._consolidate_inplace()\n\n # set the default here, so functions examining the signaure\n # can detect if something was set (e.g. in groupby) (GH9221)\n if axis is None:\n axis = 0\n axis = self._get_axis_number(axis)\n\n if value is None:\n\n if self._is_mixed_type and axis == 1:\n if inplace:\n raise NotImplementedError()\n result = self.T.fillna(method=method, limit=limit).T\n\n # need to downcast here because of all of the transposes\n result._data = result._data.downcast()\n\n return result\n\n new_data = self._data.interpolate(\n method=method,\n axis=axis,\n limit=limit,\n inplace=inplace,\n coerce=True,\n downcast=downcast,\n )\n else:\n if len(self._get_axis(axis)) == 0:\n return self\n\n if self.ndim == 1:\n if isinstance(value, (dict, ABCSeries)):\n value = create_series_with_explicit_dtype(\n value, dtype_if_empty=object\n )\n elif not is_list_like(value):\n pass\n else:\n raise TypeError(\n '\"value\" parameter must be a scalar, dict '\n \"or Series, but you passed a \"\n f'\"{type(value).__name__}\"'\n )\n\n new_data = self._data.fillna(\n value=value, limit=limit, inplace=inplace, downcast=downcast\n )\n\n elif isinstance(value, (dict, ABCSeries)):\n if axis == 1:\n raise NotImplementedError(\n \"Currently only can fill \"\n \"with dict/Series column \"\n \"by column\"\n )\n\n result = self if inplace else self.copy()\n for k, v in value.items():\n if k not in result:\n continue\n obj = result[k]\n obj.fillna(v, limit=limit, inplace=True, downcast=downcast)\n return result if not inplace else None\n\n elif not is_list_like(value):\n new_data = self._data.fillna(\n value=value, limit=limit, inplace=inplace, downcast=downcast\n )\n elif isinstance(value, ABCDataFrame) and self.ndim == 2:\n new_data = self.where(self.notna(), value)\n else:\n raise ValueError(f\"invalid fill value with a {type(value)}\")\n\n if inplace:\n self._update_inplace(new_data)\n return None\n else:\n return self._constructor(new_data).__finalize__(self)\n\n def ffill(\n self: FrameOrSeries,\n axis=None,\n inplace: bool_t = False,\n limit=None,\n downcast=None,\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.\n\n Returns\n -------\n %(klass)s or None\n Object with missing values filled or None if ``inplace=True``.\n \"\"\"\n return self.fillna(\n method=\"ffill\", axis=axis, inplace=inplace, limit=limit, downcast=downcast\n )\n\n def bfill(\n self: FrameOrSeries,\n axis=None,\n inplace: bool_t = False,\n limit=None,\n downcast=None,\n ) -> Optional[FrameOrSeries]:\n \"\"\"\n Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.\n\n Returns\n -------\n %(klass)s or None\n Object with missing values filled or None if ``inplace=True``.\n \"\"\"\n return self.fillna(\n method=\"bfill\", axis=axis, inplace=inplace, limit=limit, downcast=downcast\n )\n\n _shared_docs[\n \"replace\"\n ] = \"\"\"\n Replace values given in `to_replace` with `value`.\n\n Values of the %(klass)s are replaced with other values dynamically.\n This differs from updating with ``.loc`` or ``.iloc``, which require\n you to specify a location to update with some value.\n\n Parameters\n ----------\n to_replace : str, regex, list, dict, Series, int, float, or None\n How to find the values that will be replaced.\n\n * numeric, str or regex:\n\n - numeric: numeric values equal to `to_replace` will be\n replaced with `value`\n - str: string exactly matching `to_replace` will be replaced\n with `value`\n - regex: regexs matching `to_replace` will be replaced with\n `value`\n\n * list of str, regex, or numeric:\n\n - First, if `to_replace` and `value` are both lists, they\n **must** be the same length.\n - Second, if ``regex=True`` then all of the strings in **both**\n lists will be interpreted as regexs otherwise they will match\n directly. This doesn't matter much for `value` since there\n are only a few possible substitution regexes you can use.\n - str, regex and numeric rules apply as above.\n\n * dict:\n\n - Dicts can be used to specify different replacement values\n for different existing values. For example,\n ``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and\n 'y' with 'z'. To use a dict in this way the `value`\n parameter should be `None`.\n - For a DataFrame a dict can specify that different values\n should be replaced in different columns. For example,\n ``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'\n and the value 'z' in column 'b' and replaces these values\n with whatever is specified in `value`. The `value` parameter\n should not be ``None`` in this case. You can treat this as a\n special case of passing two lists except that you are\n specifying the column to search in.\n - For a DataFrame nested dictionaries, e.g.,\n ``{'a': {'b': np.nan}}``, are read as follows: look in column\n 'a' for the value 'b' and replace it with NaN. The `value`\n parameter should be ``None`` to use a nested dict in this\n way. You can nest regular expressions as well. Note that\n column names (the top-level dictionary keys in a nested\n dictionary) **cannot** be regular expressions.\n\n * None:\n\n - This means that the `regex` argument must be a string,\n compiled regular expression, or list, dict, ndarray or\n Series of such elements. If `value` is also ``None`` then\n this **must** be a nested dictionary or Series.\n\n See the examples section for examples of each of these.\n value : scalar, dict, list, str, regex, default None\n Value to replace any values matching `to_replace` with.\n For a DataFrame a dict of values can be used to specify which\n value to use for each column (columns not in the dict will not be\n filled). Regular expressions, strings and lists or dicts of such\n objects are also allowed.\n inplace : bool, default False\n If True, in place. Note: this will modify any\n other views on this object (e.g. a column from a DataFrame).\n Returns the caller if this is True.\n limit : int, default None\n Maximum size gap to forward or backward fill.\n regex : bool or same types as `to_replace`, default False\n Whether to interpret `to_replace` and/or `value` as regular\n expressions. If this is ``True`` then `to_replace` *must* be a\n string. Alternatively, this could be a regular expression or a\n list, dict, or array of regular expressions in which case\n `to_replace` must be ``None``.\n method : {'pad', 'ffill', 'bfill', `None`}\n The method to use when for replacement, when `to_replace` is a\n scalar, list or tuple and `value` is ``None``.\n\n .. versionchanged:: 0.23.0\n Added to DataFrame.\n\n Returns\n -------\n %(klass)s\n Object after replacement.\n\n Raises\n ------\n AssertionError\n * If `regex` is not a ``bool`` and `to_replace` is not\n ``None``.\n TypeError\n * If `to_replace` is a ``dict`` and `value` is not a ``list``,\n ``dict``, ``ndarray``, or ``Series``\n * If `to_replace` is ``None`` and `regex` is not compilable\n into a regular expression or is a list, dict, ndarray, or\n Series.\n * When replacing multiple ``bool`` or ``datetime64`` objects and\n the arguments to `to_replace` does not match the type of the\n value being replaced\n ValueError\n * If a ``list`` or an ``ndarray`` is passed to `to_replace` and\n `value` but they are not the same length.\n\n See Also\n --------\n %(klass)s.fillna : Fill NA values.\n %(klass)s.where : Replace values based on boolean condition.\n Series.str.replace : Simple string replacement.\n\n Notes\n -----\n * Regex substitution is performed under the hood with ``re.sub``. The\n rules for substitution for ``re.sub`` are the same.\n * Regular expressions will only substitute on strings, meaning you\n cannot provide, for example, a regular expression matching floating\n point numbers and expect the columns in your frame that have a\n numeric dtype to be matched. However, if those floating point\n numbers *are* strings, then you can do this.\n * This method has *a lot* of options. You are encouraged to experiment\n and play with this method to gain intuition about how it works.\n * When dict is used as the `to_replace` value, it is like\n key(s) in the dict are the to_replace part and\n value(s) in the dict are the value parameter.\n\n Examples\n --------\n\n **Scalar `to_replace` and `value`**\n\n >>> s = pd.Series([0, 1, 2, 3, 4])\n >>> s.replace(0, 5)\n 0 5\n 1 1\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],\n ... 'B': [5, 6, 7, 8, 9],\n ... 'C': ['a', 'b', 'c', 'd', 'e']})\n >>> df.replace(0, 5)\n A B C\n 0 5 5 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n **List-like `to_replace`**\n\n >>> df.replace([0, 1, 2, 3], 4)\n A B C\n 0 4 5 a\n 1 4 6 b\n 2 4 7 c\n 3 4 8 d\n 4 4 9 e\n\n >>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])\n A B C\n 0 4 5 a\n 1 3 6 b\n 2 2 7 c\n 3 1 8 d\n 4 4 9 e\n\n >>> s.replace([1, 2], method='bfill')\n 0 0\n 1 3\n 2 3\n 3 3\n 4 4\n dtype: int64\n\n **dict-like `to_replace`**\n\n >>> df.replace({0: 10, 1: 100})\n A B C\n 0 10 5 a\n 1 100 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n >>> df.replace({'A': 0, 'B': 5}, 100)\n A B C\n 0 100 100 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n >>> df.replace({'A': {0: 100, 4: 400}})\n A B C\n 0 100 5 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 400 9 e\n\n **Regular expression `to_replace`**\n\n >>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],\n ... 'B': ['abc', 'bar', 'xyz']})\n >>> df.replace(to_replace=r'^ba.$', value='new', regex=True)\n A B\n 0 new abc\n 1 foo new\n 2 bait xyz\n\n >>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)\n A B\n 0 new abc\n 1 foo bar\n 2 bait xyz\n\n >>> df.replace(regex=r'^ba.$', value='new')\n A B\n 0 new abc\n 1 foo new\n 2 bait xyz\n\n >>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})\n A B\n 0 new abc\n 1 xyz new\n 2 bait xyz\n\n >>> df.replace(regex=[r'^ba.$', 'foo'], value='new')\n A B\n 0 new abc\n 1 new new\n 2 bait xyz\n\n Note that when replacing multiple ``bool`` or ``datetime64`` objects,\n the data types in the `to_replace` parameter must match the data\n type of the value being replaced:\n\n >>> df = pd.DataFrame({'A': [True, False, True],\n ... 'B': [False, True, False]})\n >>> df.replace({'a string': 'new value', True: False}) # raises\n Traceback (most recent call last):\n ...\n TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'\n\n This raises a ``TypeError`` because one of the ``dict`` keys is not of\n the correct type for replacement.\n\n Compare the behavior of ``s.replace({'a': None})`` and\n ``s.replace('a', None)`` to understand the peculiarities\n of the `to_replace` parameter:\n\n >>> s = pd.Series([10, 'a', 'a', 'b', 'a'])\n\n When one uses a dict as the `to_replace` value, it is like the\n value(s) in the dict are equal to the `value` parameter.\n ``s.replace({'a': None})`` is equivalent to\n ``s.replace(to_replace={'a': None}, value=None, method=None)``:\n\n >>> s.replace({'a': None})\n 0 10\n 1 None\n 2 None\n 3 b\n 4 None\n dtype: object\n\n When ``value=None`` and `to_replace` is a scalar, list or\n tuple, `replace` uses the method parameter (default 'pad') to do the\n replacement. So this is why the 'a' values are being replaced by 10\n in rows 1 and 2 and 'b' in row 4 in this case.\n The command ``s.replace('a', None)`` is actually equivalent to\n ``s.replace(to_replace='a', value=None, method='pad')``:\n\n >>> s.replace('a', None)\n 0 10\n 1 10\n 2 10\n 3 b\n 4 b\n dtype: object\n \"\"\"\n\n @Appender(_shared_docs[\"replace\"] % _shared_doc_kwargs)\n def replace(\n self,\n to_replace=None,\n value=None,\n inplace=False,\n limit=None,\n regex=False,\n method=\"pad\",\n ):\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if not is_bool(regex) and to_replace is not None:\n raise AssertionError(\"'to_replace' must be 'None' if 'regex' is not a bool\")\n\n self._consolidate_inplace()\n\n if value is None:\n # passing a single value that is scalar like\n # when value is None (GH5319), for compat\n if not is_dict_like(to_replace) and not is_dict_like(regex):\n to_replace = [to_replace]\n\n if isinstance(to_replace, (tuple, list)):\n if isinstance(self, ABCDataFrame):\n return self.apply(\n _single_replace, args=(to_replace, method, inplace, limit)\n )\n return _single_replace(self, to_replace, method, inplace, limit)\n\n if not is_dict_like(to_replace):\n if not is_dict_like(regex):\n raise TypeError(\n 'If \"to_replace\" and \"value\" are both None'\n ' and \"to_replace\" is not a list, then '\n \"regex must be a mapping\"\n )\n to_replace = regex\n regex = True\n\n items = list(to_replace.items())\n keys, values = zip(*items) if items else ([], [])\n\n are_mappings = [is_dict_like(v) for v in values]\n\n if any(are_mappings):\n if not all(are_mappings):\n raise TypeError(\n \"If a nested mapping is passed, all values\"\n \" of the top level mapping must be \"\n \"mappings\"\n )\n # passed a nested dict/Series\n to_rep_dict = {}\n value_dict = {}\n\n for k, v in items:\n keys, values = list(zip(*v.items())) or ([], [])\n\n to_rep_dict[k] = list(keys)\n value_dict[k] = list(values)\n\n to_replace, value = to_rep_dict, value_dict\n else:\n to_replace, value = keys, values\n\n return self.replace(\n to_replace, value, inplace=inplace, limit=limit, regex=regex\n )\n else:\n\n # need a non-zero len on all axes\n if not self.size:\n return self\n\n new_data = self._data\n if is_dict_like(to_replace):\n if is_dict_like(value): # {'A' : NA} -> {'A' : 0}\n res = self if inplace else self.copy()\n for c, src in to_replace.items():\n if c in value and c in self:\n # object conversion is handled in\n # series.replace which is called recursively\n res[c] = res[c].replace(\n to_replace=src,\n value=value[c],\n inplace=False,\n regex=regex,\n )\n return None if inplace else res\n\n # {'A': NA} -> 0\n elif not is_list_like(value):\n keys = [(k, src) for k, src in to_replace.items() if k in self]\n keys_len = len(keys) - 1\n for i, (k, src) in enumerate(keys):\n convert = i == keys_len\n new_data = new_data.replace(\n to_replace=src,\n value=value,\n filter=[k],\n inplace=inplace,\n regex=regex,\n convert=convert,\n )\n else:\n raise TypeError(\"value argument must be scalar, dict, or Series\")\n\n elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']\n if is_list_like(value):\n if len(to_replace) != len(value):\n raise ValueError(\n f\"Replacement lists must match in length. \"\n f\"Expecting {len(to_replace)} got {len(value)} \"\n )\n\n new_data = self._data.replace_list(\n src_list=to_replace,\n dest_list=value,\n inplace=inplace,\n regex=regex,\n )\n\n else: # [NA, ''] -> 0\n new_data = self._data.replace(\n to_replace=to_replace, value=value, inplace=inplace, regex=regex\n )\n elif to_replace is None:\n if not (\n is_re_compilable(regex)\n or is_list_like(regex)\n or is_dict_like(regex)\n ):\n raise TypeError(\n f\"'regex' must be a string or a compiled regular expression \"\n f\"or a list or dict of strings or regular expressions, \"\n f\"you passed a {repr(type(regex).__name__)}\"\n )\n return self.replace(\n regex, value, inplace=inplace, limit=limit, regex=True\n )\n else:\n\n # dest iterable dict-like\n if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}\n new_data = self._data\n\n for k, v in value.items():\n if k in self:\n new_data = new_data.replace(\n to_replace=to_replace,\n value=v,\n filter=[k],\n inplace=inplace,\n regex=regex,\n )\n\n elif not is_list_like(value): # NA -> 0\n new_data = self._data.replace(\n to_replace=to_replace, value=value, inplace=inplace, regex=regex\n )\n else:\n raise TypeError(\n f'Invalid \"to_replace\" type: {repr(type(to_replace).__name__)}'\n )\n\n if inplace:\n self._update_inplace(new_data)\n else:\n return self._constructor(new_data).__finalize__(self)\n\n _shared_docs[\n \"interpolate\"\n ] = \"\"\"\n Please note that only ``method='linear'`` is supported for\n DataFrame/Series with a MultiIndex.\n\n Parameters\n ----------\n method : str, default 'linear'\n Interpolation technique to use. One of:\n\n * 'linear': Ignore the index and treat the values as equally\n spaced. This is the only method supported on MultiIndexes.\n * 'time': Works on daily and higher resolution data to interpolate\n given length of interval.\n * 'index', 'values': use the actual numerical values of the index.\n * 'pad': Fill in NaNs using existing values.\n * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',\n 'barycentric', 'polynomial': Passed to\n `scipy.interpolate.interp1d`. These methods use the numerical\n values of the index. Both 'polynomial' and 'spline' require that\n you also specify an `order` (int), e.g.\n ``df.interpolate(method='polynomial', order=5)``.\n * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':\n Wrappers around the SciPy interpolation methods of similar\n names. See `Notes`.\n * 'from_derivatives': Refers to\n `scipy.interpolate.BPoly.from_derivatives` which\n replaces 'piecewise_polynomial' interpolation method in\n scipy 0.18.\n axis : {0 or 'index', 1 or 'columns', None}, default None\n Axis to interpolate along.\n limit : int, optional\n Maximum number of consecutive NaNs to fill. Must be greater than\n 0.\n inplace : bool, default False\n Update the data in place if possible.\n limit_direction : {'forward', 'backward', 'both'}, default 'forward'\n If limit is specified, consecutive NaNs will be filled in this\n direction.\n limit_area : {`None`, 'inside', 'outside'}, default None\n If limit is specified, consecutive NaNs will be filled with this\n restriction.\n\n * ``None``: No fill restriction.\n * 'inside': Only fill NaNs surrounded by valid values\n (interpolate).\n * 'outside': Only fill NaNs outside valid values (extrapolate).\n\n .. versionadded:: 0.23.0\n\n downcast : optional, 'infer' or None, defaults to None\n Downcast dtypes if possible.\n **kwargs\n Keyword arguments to pass on to the interpolating function.\n\n Returns\n -------\n Series or DataFrame\n Returns the same object type as the caller, interpolated at\n some or all ``NaN`` values.\n\n See Also\n --------\n fillna : Fill missing values using different methods.\n scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials\n (Akima interpolator).\n scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the\n Bernstein basis.\n scipy.interpolate.interp1d : Interpolate a 1-D function.\n scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh\n interpolator).\n scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic\n interpolation.\n scipy.interpolate.CubicSpline : Cubic spline data interpolator.\n\n Notes\n -----\n The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'\n methods are wrappers around the respective SciPy implementations of\n similar names. These use the actual numerical values of the index.\n For more information on their behavior, see the\n `SciPy documentation\n <http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__\n and `SciPy tutorial\n <http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.\n\n Examples\n --------\n Filling in ``NaN`` in a :class:`~pandas.Series` via linear\n interpolation.\n\n >>> s = pd.Series([0, 1, np.nan, 3])\n >>> s\n 0 0.0\n 1 1.0\n 2 NaN\n 3 3.0\n dtype: float64\n >>> s.interpolate()\n 0 0.0\n 1 1.0\n 2 2.0\n 3 3.0\n dtype: float64\n\n Filling in ``NaN`` in a Series by padding, but filling at most two\n consecutive ``NaN`` at a time.\n\n >>> s = pd.Series([np.nan, \"single_one\", np.nan,\n ... \"fill_two_more\", np.nan, np.nan, np.nan,\n ... 4.71, np.nan])\n >>> s\n 0 NaN\n 1 single_one\n 2 NaN\n 3 fill_two_more\n 4 NaN\n 5 NaN\n 6 NaN\n 7 4.71\n 8 NaN\n dtype: object\n >>> s.interpolate(method='pad', limit=2)\n 0 NaN\n 1 single_one\n 2 single_one\n 3 fill_two_more\n 4 fill_two_more\n 5 fill_two_more\n 6 NaN\n 7 4.71\n 8 4.71\n dtype: object\n\n Filling in ``NaN`` in a Series via polynomial interpolation or splines:\n Both 'polynomial' and 'spline' methods require that you also specify\n an ``order`` (int).\n\n >>> s = pd.Series([0, 2, np.nan, 8])\n >>> s.interpolate(method='polynomial', order=2)\n 0 0.000000\n 1 2.000000\n 2 4.666667\n 3 8.000000\n dtype: float64\n\n Fill the DataFrame forward (that is, going down) along each column\n using linear interpolation.\n\n Note how the last entry in column 'a' is interpolated differently,\n because there is no entry after it to use for interpolation.\n Note how the first entry in column 'b' remains ``NaN``, because there\n is no entry before it to use for interpolation.\n\n >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),\n ... (np.nan, 2.0, np.nan, np.nan),\n ... (2.0, 3.0, np.nan, 9.0),\n ... (np.nan, 4.0, -4.0, 16.0)],\n ... columns=list('abcd'))\n >>> df\n a b c d\n 0 0.0 NaN -1.0 1.0\n 1 NaN 2.0 NaN NaN\n 2 2.0 3.0 NaN 9.0\n 3 NaN 4.0 -4.0 16.0\n >>> df.interpolate(method='linear', limit_direction='forward', axis=0)\n a b c d\n 0 0.0 NaN -1.0 1.0\n 1 1.0 2.0 -2.0 5.0\n 2 2.0 3.0 -3.0 9.0\n 3 2.0 4.0 -4.0 16.0\n\n Using polynomial interpolation.\n\n >>> df['d'].interpolate(method='polynomial', order=2)\n 0 1.0\n 1 4.0\n 2 9.0\n 3 16.0\n Name: d, dtype: float64\n \"\"\"\n\n @Appender(_shared_docs[\"interpolate\"] % _shared_doc_kwargs)\n def interpolate(\n self,\n method=\"linear\",\n axis=0,\n limit=None,\n inplace=False,\n limit_direction=\"forward\",\n limit_area=None,\n downcast=None,\n **kwargs,\n ):\n \"\"\"\n Interpolate values according to different methods.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n axis = self._get_axis_number(axis)\n\n if axis == 0:\n ax = self._info_axis_name\n _maybe_transposed_self = self\n elif axis == 1:\n _maybe_transposed_self = self.T\n ax = 1\n\n ax = _maybe_transposed_self._get_axis_number(ax)\n\n if _maybe_transposed_self.ndim == 2:\n alt_ax = 1 - ax\n else:\n alt_ax = ax\n\n if isinstance(_maybe_transposed_self.index, MultiIndex) and method != \"linear\":\n raise ValueError(\n \"Only `method=linear` interpolation is supported on MultiIndexes.\"\n )\n\n if _maybe_transposed_self._data.get_dtype_counts().get(\"object\") == len(\n _maybe_transposed_self.T\n ):\n raise TypeError(\n \"Cannot interpolate with all object-dtype columns \"\n \"in the DataFrame. Try setting at least one \"\n \"column to a numeric dtype.\"\n )\n\n # create/use the index\n if method == \"linear\":\n # prior default\n index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))\n else:\n index = _maybe_transposed_self._get_axis(alt_ax)\n methods = {\"index\", \"values\", \"nearest\", \"time\"}\n is_numeric_or_datetime = (\n is_numeric_dtype(index)\n or is_datetime64_any_dtype(index)\n or is_timedelta64_dtype(index)\n )\n if method not in methods and not is_numeric_or_datetime:\n raise ValueError(\n \"Index column must be numeric or datetime type when \"\n f\"using {method} method other than linear. \"\n \"Try setting a numeric or datetime index column before \"\n \"interpolating.\"\n )\n\n if isna(index).any():\n raise NotImplementedError(\n \"Interpolation with NaNs in the index \"\n \"has not been implemented. Try filling \"\n \"those NaNs before interpolating.\"\n )\n data = _maybe_transposed_self._data\n new_data = data.interpolate(\n method=method,\n axis=ax,\n index=index,\n values=_maybe_transposed_self,\n limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n inplace=inplace,\n downcast=downcast,\n **kwargs,\n )\n\n if inplace:\n if axis == 1:\n new_data = self._constructor(new_data).T._data\n self._update_inplace(new_data)\n else:\n res = self._constructor(new_data).__finalize__(self)\n if axis == 1:\n res = res.T\n return res\n\n # ----------------------------------------------------------------------\n # Timeseries methods Methods\n\n def asof(self, where, subset=None):\n \"\"\"\n Return the last row(s) without any NaNs before `where`.\n\n The last row (for each element in `where`, if list) without any\n NaN is taken.\n In case of a :class:`~pandas.DataFrame`, the last row without NaN\n considering only the subset of columns (if not `None`)\n\n If there is no good value, NaN is returned for a Series or\n a Series of NaN values for a DataFrame\n\n Parameters\n ----------\n where : date or array-like of dates\n Date(s) before which the last row(s) are returned.\n subset : str or array-like of str, default `None`\n For DataFrame, if not `None`, only use these columns to\n check for NaNs.\n\n Returns\n -------\n scalar, Series, or DataFrame\n\n The return can be:\n\n * scalar : when `self` is a Series and `where` is a scalar\n * Series: when `self` is a Series and `where` is an array-like,\n or when `self` is a DataFrame and `where` is a scalar\n * DataFrame : when `self` is a DataFrame and `where` is an\n array-like\n\n Return scalar, Series, or DataFrame.\n\n See Also\n --------\n merge_asof : Perform an asof merge. Similar to left join.\n\n Notes\n -----\n Dates are assumed to be sorted. Raises if this is not the case.\n\n Examples\n --------\n A Series and a scalar `where`.\n\n >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])\n >>> s\n 10 1.0\n 20 2.0\n 30 NaN\n 40 4.0\n dtype: float64\n\n >>> s.asof(20)\n 2.0\n\n For a sequence `where`, a Series is returned. The first value is\n NaN, because the first element of `where` is before the first\n index value.\n\n >>> s.asof([5, 20])\n 5 NaN\n 20 2.0\n dtype: float64\n\n Missing values are not considered. The following is ``2.0``, not\n NaN, even though NaN is at the index location for ``30``.\n\n >>> s.asof(30)\n 2.0\n\n Take all columns into consideration\n\n >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],\n ... 'b': [None, None, None, None, 500]},\n ... index=pd.DatetimeIndex(['2018-02-27 09:01:00',\n ... '2018-02-27 09:02:00',\n ... '2018-02-27 09:03:00',\n ... '2018-02-27 09:04:00',\n ... '2018-02-27 09:05:00']))\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']))\n a b\n 2018-02-27 09:03:30 NaN NaN\n 2018-02-27 09:04:30 NaN NaN\n\n Take a single column into consideration\n\n >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',\n ... '2018-02-27 09:04:30']),\n ... subset=['a'])\n a b\n 2018-02-27 09:03:30 30.0 NaN\n 2018-02-27 09:04:30 40.0 NaN\n \"\"\"\n if isinstance(where, str):\n where = Timestamp(where)\n\n if not self.index.is_monotonic:\n raise ValueError(\"asof requires a sorted index\")\n\n is_series = isinstance(self, ABCSeries)\n if is_series:\n if subset is not None:\n raise ValueError(\"subset is not valid for Series\")\n else:\n if subset is None:\n subset = self.columns\n if not is_list_like(subset):\n subset = [subset]\n\n is_list = is_list_like(where)\n if not is_list:\n start = self.index[0]\n if isinstance(self.index, PeriodIndex):\n where = Period(where, freq=self.index.freq).ordinal\n start = start.ordinal\n\n if where < start:\n if not is_series:\n from pandas import Series\n\n return Series(index=self.columns, name=where, dtype=np.float64)\n return np.nan\n\n # It's always much faster to use a *while* loop here for\n # Series than pre-computing all the NAs. However a\n # *while* loop is extremely expensive for DataFrame\n # so we later pre-compute all the NAs and use the same\n # code path whether *where* is a scalar or list.\n # See PR: https://github.com/pandas-dev/pandas/pull/14476\n if is_series:\n loc = self.index.searchsorted(where, side=\"right\")\n if loc > 0:\n loc -= 1\n\n values = self._values\n while loc > 0 and isna(values[loc]):\n loc -= 1\n return values[loc]\n\n if not isinstance(where, Index):\n where = Index(where) if is_list else Index([where])\n\n nulls = self.isna() if is_series else self[subset].isna().any(1)\n if nulls.all():\n if is_series:\n return self._constructor(np.nan, index=where, name=self.name)\n elif is_list:\n from pandas import DataFrame\n\n return DataFrame(np.nan, index=where, columns=self.columns)\n else:\n from pandas import Series\n\n return Series(np.nan, index=self.columns, name=where[0])\n\n locs = self.index.asof_locs(where, ~(nulls.values))\n\n # mask the missing\n missing = locs == -1\n data = self.take(locs, is_copy=False)\n data.index = where\n data.loc[missing] = np.nan\n return data if is_list else data.iloc[-1]\n\n # ----------------------------------------------------------------------\n # Action Methods\n\n _shared_docs[\n \"isna\"\n ] = \"\"\"\n Detect missing values.\n\n Return a boolean same-sized object indicating if the values are NA.\n NA values, such as None or :attr:`numpy.NaN`, gets mapped to True\n values.\n Everything else gets mapped to False values. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n\n Returns\n -------\n %(klass)s\n Mask of bool values for each element in %(klass)s that\n indicates whether an element is not an NA value.\n\n See Also\n --------\n %(klass)s.isnull : Alias of isna.\n %(klass)s.notna : Boolean inverse of isna.\n %(klass)s.dropna : Omit axes labels with missing values.\n isna : Top-level isna.\n\n Examples\n --------\n Show which entries in a DataFrame are NA.\n\n >>> df = pd.DataFrame({'age': [5, 6, np.NaN],\n ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n ... pd.Timestamp('1940-04-25')],\n ... 'name': ['Alfred', 'Batman', ''],\n ... 'toy': [None, 'Batmobile', 'Joker']})\n >>> df\n age born name toy\n 0 5.0 NaT Alfred None\n 1 6.0 1939-05-27 Batman Batmobile\n 2 NaN 1940-04-25 Joker\n\n >>> df.isna()\n age born name toy\n 0 False True False True\n 1 False False False False\n 2 True False False False\n\n Show which entries in a Series are NA.\n\n >>> ser = pd.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.isna()\n 0 False\n 1 False\n 2 True\n dtype: bool\n \"\"\"\n\n @Appender(_shared_docs[\"isna\"] % _shared_doc_kwargs)\n def isna(self):\n return isna(self).__finalize__(self)\n\n @Appender(_shared_docs[\"isna\"] % _shared_doc_kwargs)\n def isnull(self):\n return isna(self).__finalize__(self)\n\n _shared_docs[\n \"notna\"\n ] = \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to True. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n NA values, such as None or :attr:`numpy.NaN`, get mapped to False\n values.\n\n Returns\n -------\n %(klass)s\n Mask of bool values for each element in %(klass)s that\n indicates whether an element is not an NA value.\n\n See Also\n --------\n %(klass)s.notnull : Alias of notna.\n %(klass)s.isna : Boolean inverse of notna.\n %(klass)s.dropna : Omit axes labels with missing values.\n notna : Top-level notna.\n\n Examples\n --------\n Show which entries in a DataFrame are not NA.\n\n >>> df = pd.DataFrame({'age': [5, 6, np.NaN],\n ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n ... pd.Timestamp('1940-04-25')],\n ... 'name': ['Alfred', 'Batman', ''],\n ... 'toy': [None, 'Batmobile', 'Joker']})\n >>> df\n age born name toy\n 0 5.0 NaT Alfred None\n 1 6.0 1939-05-27 Batman Batmobile\n 2 NaN 1940-04-25 Joker\n\n >>> df.notna()\n age born name toy\n 0 True False True False\n 1 True True True True\n 2 False True True True\n\n Show which entries in a Series are not NA.\n\n >>> ser = pd.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.notna()\n 0 True\n 1 True\n 2 False\n dtype: bool\n \"\"\"\n\n @Appender(_shared_docs[\"notna\"] % _shared_doc_kwargs)\n def notna(self):\n return notna(self).__finalize__(self)\n\n @Appender(_shared_docs[\"notna\"] % _shared_doc_kwargs)\n def notnull(self):\n return notna(self).__finalize__(self)\n\n def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):\n if (lower is not None and np.any(isna(lower))) or (\n upper is not None and np.any(isna(upper))\n ):\n raise ValueError(\"Cannot use an NA value as a clip threshold\")\n\n result = self\n mask = isna(self.values)\n\n with np.errstate(all=\"ignore\"):\n if upper is not None:\n subset = self.to_numpy() <= upper\n result = result.where(subset, upper, axis=None, inplace=False)\n if lower is not None:\n subset = self.to_numpy() >= lower\n result = result.where(subset, lower, axis=None, inplace=False)\n\n if np.any(mask):\n result[mask] = np.nan\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def _clip_with_one_bound(self, threshold, method, axis, inplace):\n\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n # method is self.le for upper bound and self.ge for lower bound\n if is_scalar(threshold) and is_number(threshold):\n if method.__name__ == \"le\":\n return self._clip_with_scalar(None, threshold, inplace=inplace)\n return self._clip_with_scalar(threshold, None, inplace=inplace)\n\n subset = method(threshold, axis=axis) | isna(self)\n\n # GH #15390\n # In order for where method to work, the threshold must\n # be transformed to NDFrame from other array like structure.\n if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):\n if isinstance(self, ABCSeries):\n threshold = self._constructor(threshold, index=self.index)\n else:\n threshold = _align_method_FRAME(self, threshold, axis)\n return self.where(subset, threshold, axis=axis, inplace=inplace)\n\n def clip(\n self,\n lower=None,\n upper=None,\n axis=None,\n inplace: bool_t = False,\n *args,\n **kwargs,\n ):\n \"\"\"\n Trim values at input threshold(s).\n\n Assigns values outside boundary to boundary values. Thresholds\n can be singular values or array like, and in the latter case\n the clipping is performed element-wise in the specified axis.\n\n Parameters\n ----------\n lower : float or array_like, default None\n Minimum threshold value. All values below this\n threshold will be set to it.\n upper : float or array_like, default None\n Maximum threshold value. All values above this\n threshold will be set to it.\n axis : int or str axis name, optional\n Align object with lower and upper along the given axis.\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n\n .. versionadded:: 0.21.0\n *args, **kwargs\n Additional keywords have no effect but might be accepted\n for compatibility with numpy.\n\n Returns\n -------\n Series or DataFrame\n Same type as calling object with the values outside the\n clip boundaries replaced.\n\n Examples\n --------\n >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}\n >>> df = pd.DataFrame(data)\n >>> df\n col_0 col_1\n 0 9 -2\n 1 -3 -7\n 2 0 6\n 3 -1 8\n 4 5 -5\n\n Clips per column using lower and upper thresholds:\n\n >>> df.clip(-4, 6)\n col_0 col_1\n 0 6 -2\n 1 -3 -4\n 2 0 6\n 3 -1 6\n 4 5 -4\n\n Clips using specific lower and upper thresholds per column element:\n\n >>> t = pd.Series([2, -4, -1, 6, 3])\n >>> t\n 0 2\n 1 -4\n 2 -1\n 3 6\n 4 3\n dtype: int64\n\n >>> df.clip(t, t + 4, axis=0)\n col_0 col_1\n 0 6 2\n 1 -3 -4\n 2 0 3\n 3 6 8\n 4 5 3\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n axis = nv.validate_clip_with_axis(axis, args, kwargs)\n if axis is not None:\n axis = self._get_axis_number(axis)\n\n # GH 17276\n # numpy doesn't like NaN as a clip value\n # so ignore\n # GH 19992\n # numpy doesn't drop a list-like bound containing NaN\n if not is_list_like(lower) and np.any(isna(lower)):\n lower = None\n if not is_list_like(upper) and np.any(isna(upper)):\n upper = None\n\n # GH 2747 (arguments were reversed)\n if lower is not None and upper is not None:\n if is_scalar(lower) and is_scalar(upper):\n lower, upper = min(lower, upper), max(lower, upper)\n\n # fast-path for scalars\n if (lower is None or (is_scalar(lower) and is_number(lower))) and (\n upper is None or (is_scalar(upper) and is_number(upper))\n ):\n return self._clip_with_scalar(lower, upper, inplace=inplace)\n\n result = self\n if lower is not None:\n result = result._clip_with_one_bound(\n lower, method=self.ge, axis=axis, inplace=inplace\n )\n if upper is not None:\n if inplace:\n result = self\n result = result._clip_with_one_bound(\n upper, method=self.le, axis=axis, inplace=inplace\n )\n\n return result\n\n def groupby(\n self,\n by=None,\n axis=0,\n level=None,\n as_index: bool_t = True,\n sort: bool_t = True,\n group_keys: bool_t = True,\n squeeze: bool_t = False,\n observed: bool_t = False,\n ):\n \"\"\"\n Group DataFrame or Series using a mapper or by a Series of columns.\n\n A groupby operation involves some combination of splitting the\n object, applying a function, and combining the results. This can be\n used to group large amounts of data and compute operations on these\n groups.\n\n Parameters\n ----------\n by : mapping, function, label, or list of labels\n Used to determine the groups for the groupby.\n If ``by`` is a function, it's called on each value of the object's\n index. If a dict or Series is passed, the Series or dict VALUES\n will be used to determine the groups (the Series' values are first\n aligned; see ``.align()`` method). If an ndarray is passed, the\n values are used as-is determine the groups. A label or list of\n labels may be passed to group by the columns in ``self``. Notice\n that a tuple is interpreted as a (single) key.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Split along rows (0) or columns (1).\n level : int, level name, or sequence of such, default None\n If the axis is a MultiIndex (hierarchical), group by a particular\n level or levels.\n as_index : bool, default True\n For aggregated output, return object with group labels as the\n index. Only relevant for DataFrame input. as_index=False is\n effectively \"SQL-style\" grouped output.\n sort : bool, default True\n Sort group keys. Get better performance by turning this off.\n Note this does not influence the order of observations within each\n group. Groupby preserves the order of rows within each group.\n group_keys : bool, default True\n When calling apply, add group keys to index to identify pieces.\n squeeze : bool, default False\n Reduce the dimensionality of the return type if possible,\n otherwise return a consistent type.\n observed : bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n DataFrameGroupBy or SeriesGroupBy\n Depends on the calling object and returns groupby object that\n contains information about the groups.\n\n See Also\n --------\n resample : Convenience method for frequency conversion and resampling\n of time series.\n\n Notes\n -----\n See the `user guide\n <http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.\n\n Examples\n --------\n >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',\n ... 'Parrot', 'Parrot'],\n ... 'Max Speed': [380., 370., 24., 26.]})\n >>> df\n Animal Max Speed\n 0 Falcon 380.0\n 1 Falcon 370.0\n 2 Parrot 24.0\n 3 Parrot 26.0\n >>> df.groupby(['Animal']).mean()\n Max Speed\n Animal\n Falcon 375.0\n Parrot 25.0\n\n **Hierarchical Indexes**\n\n We can groupby different levels of a hierarchical index\n using the `level` parameter:\n\n >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],\n ... ['Captive', 'Wild', 'Captive', 'Wild']]\n >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))\n >>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},\n ... index=index)\n >>> df\n Max Speed\n Animal Type\n Falcon Captive 390.0\n Wild 350.0\n Parrot Captive 30.0\n Wild 20.0\n >>> df.groupby(level=0).mean()\n Max Speed\n Animal\n Falcon 370.0\n Parrot 25.0\n >>> df.groupby(level=1).mean()\n Max Speed\n Type\n Captive 210.0\n Wild 185.0\n \"\"\"\n from pandas.core.groupby.groupby import get_groupby\n\n if level is None and by is None:\n raise TypeError(\"You have to supply one of 'by' and 'level'\")\n axis = self._get_axis_number(axis)\n\n return get_groupby(\n self,\n by=by,\n axis=axis,\n level=level,\n as_index=as_index,\n sort=sort,\n group_keys=group_keys,\n squeeze=squeeze,\n observed=observed,\n )\n\n def asfreq(\n self,\n freq,\n method=None,\n how: Optional[str] = None,\n normalize: bool_t = False,\n fill_value=None,\n ):\n \"\"\"\n Convert TimeSeries to specified frequency.\n\n Optionally provide filling method to pad/backfill missing values.\n\n Returns the original data conformed to a new index with the specified\n frequency. ``resample`` is more appropriate if an operation, such as\n summarization, is necessary to represent the data at the new frequency.\n\n Parameters\n ----------\n freq : DateOffset or str\n method : {'backfill'/'bfill', 'pad'/'ffill'}, default None\n Method to use for filling holes in reindexed Series (note this\n does not fill NaNs that already were present):\n\n * 'pad' / 'ffill': propagate last valid observation forward to next\n valid\n * 'backfill' / 'bfill': use NEXT valid observation to fill.\n how : {'start', 'end'}, default end\n For PeriodIndex only (see PeriodIndex.asfreq).\n normalize : bool, default False\n Whether to reset output index to midnight.\n fill_value : scalar, optional\n Value to use for missing values, applied during upsampling (note\n this does not fill NaNs that already were present).\n\n Returns\n -------\n converted : same type as caller\n\n See Also\n --------\n reindex\n\n Notes\n -----\n To learn more about the frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n\n Start by creating a series with 4 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=4, freq='T')\n >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)\n >>> df = pd.DataFrame({'s':series})\n >>> df\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:03:00 3.0\n\n Upsample the series into 30 second bins.\n\n >>> df.asfreq(freq='30S')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 NaN\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``fill value``.\n\n >>> df.asfreq(freq='30S', fill_value=9.0)\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 9.0\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 9.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 9.0\n 2000-01-01 00:03:00 3.0\n\n Upsample again, providing a ``method``.\n\n >>> df.asfreq(freq='30S', method='bfill')\n s\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 NaN\n 2000-01-01 00:01:30 2.0\n 2000-01-01 00:02:00 2.0\n 2000-01-01 00:02:30 3.0\n 2000-01-01 00:03:00 3.0\n \"\"\"\n from pandas.core.resample import asfreq\n\n return asfreq(\n self,\n freq,\n method=method,\n how=how,\n normalize=normalize,\n fill_value=fill_value,\n )\n\n def at_time(self, time, asof: bool_t = False, axis=None):\n \"\"\"\n Select values at particular time of day (e.g. 9:30AM).\n\n Parameters\n ----------\n time : datetime.time or str\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n between_time : Select values between particular times of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_at_time : Get just the index locations for\n values at particular time of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='12H')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-09 12:00:00 2\n 2018-04-10 00:00:00 3\n 2018-04-10 12:00:00 4\n\n >>> ts.at_time('12:00')\n A\n 2018-04-09 12:00:00 2\n 2018-04-10 12:00:00 4\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n\n index = self._get_axis(axis)\n try:\n indexer = index.indexer_at_time(time, asof=asof)\n except AttributeError:\n raise TypeError(\"Index must be DatetimeIndex\")\n\n return self.take(indexer, axis=axis)\n\n def between_time(\n self,\n start_time,\n end_time,\n include_start: bool_t = True,\n include_end: bool_t = True,\n axis=None,\n ):\n \"\"\"\n Select values between particular times of the day (e.g., 9:00-9:30 AM).\n\n By setting ``start_time`` to be later than ``end_time``,\n you can get the times that are *not* between the two times.\n\n Parameters\n ----------\n start_time : datetime.time or str\n end_time : datetime.time or str\n include_start : bool, default True\n include_end : bool, default True\n axis : {0 or 'index', 1 or 'columns'}, default 0\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n at_time : Select values at a particular time of the day.\n first : Select initial periods of time series based on a date offset.\n last : Select final periods of time series based on a date offset.\n DatetimeIndex.indexer_between_time : Get just the index locations for\n values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 00:00:00 1\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n 2018-04-12 01:00:00 4\n\n >>> ts.between_time('0:15', '0:45')\n A\n 2018-04-10 00:20:00 2\n 2018-04-11 00:40:00 3\n\n You get the times that are *not* between two times by setting\n ``start_time`` later than ``end_time``:\n\n >>> ts.between_time('0:45', '0:15')\n A\n 2018-04-09 00:00:00 1\n 2018-04-12 01:00:00 4\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n\n index = self._get_axis(axis)\n try:\n indexer = index.indexer_between_time(\n start_time,\n end_time,\n include_start=include_start,\n include_end=include_end,\n )\n except AttributeError:\n raise TypeError(\"Index must be DatetimeIndex\")\n\n return self.take(indexer, axis=axis)\n\n def resample(\n self,\n rule,\n axis=0,\n closed: Optional[str] = None,\n label: Optional[str] = None,\n convention: str = \"start\",\n kind: Optional[str] = None,\n loffset=None,\n base: int = 0,\n on=None,\n level=None,\n ):\n \"\"\"\n Resample time-series data.\n\n Convenience method for frequency conversion and resampling of time\n series. Object must have a datetime-like index (`DatetimeIndex`,\n `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values\n to the `on` or `level` keyword.\n\n Parameters\n ----------\n rule : DateOffset, Timedelta or str\n The offset string or object representing target conversion.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Which axis to use for up- or down-sampling. For `Series` this\n will default to 0, i.e. along the rows. Must be\n `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.\n closed : {'right', 'left'}, default None\n Which side of bin interval is closed. The default is 'left'\n for all frequency offsets except for 'M', 'A', 'Q', 'BM',\n 'BA', 'BQ', and 'W' which all have a default of 'right'.\n label : {'right', 'left'}, default None\n Which bin edge label to label bucket with. The default is 'left'\n for all frequency offsets except for 'M', 'A', 'Q', 'BM',\n 'BA', 'BQ', and 'W' which all have a default of 'right'.\n convention : {'start', 'end', 's', 'e'}, default 'start'\n For `PeriodIndex` only, controls whether to use the start or\n end of `rule`.\n kind : {'timestamp', 'period'}, optional, default None\n Pass 'timestamp' to convert the resulting index to a\n `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.\n By default the input representation is retained.\n loffset : timedelta, default None\n Adjust the resampled time labels.\n base : int, default 0\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n on : str, optional\n For a DataFrame, column to use instead of index for resampling.\n Column must be datetime-like.\n\n level : str or int, optional\n For a MultiIndex, level (name or number) to use for\n resampling. `level` must be datetime-like.\n\n Returns\n -------\n Resampler object\n\n See Also\n --------\n groupby : Group by mapping, function, label, or list of labels.\n Series.resample : Resample a Series.\n DataFrame.resample: Resample a DataFrame.\n\n Notes\n -----\n See the `user guide\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_\n for more.\n\n To learn more about the offset strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.\n\n Examples\n --------\n\n Start by creating a series with 9 one minute timestamps.\n\n >>> index = pd.date_range('1/1/2000', periods=9, freq='T')\n >>> series = pd.Series(range(9), index=index)\n >>> series\n 2000-01-01 00:00:00 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:02:00 2\n 2000-01-01 00:03:00 3\n 2000-01-01 00:04:00 4\n 2000-01-01 00:05:00 5\n 2000-01-01 00:06:00 6\n 2000-01-01 00:07:00 7\n 2000-01-01 00:08:00 8\n Freq: T, dtype: int64\n\n Downsample the series into 3 minute bins and sum the values\n of the timestamps falling into a bin.\n\n >>> series.resample('3T').sum()\n 2000-01-01 00:00:00 3\n 2000-01-01 00:03:00 12\n 2000-01-01 00:06:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but label each\n bin using the right edge instead of the left. Please note that the\n value in the bucket used as the label is not included in the bucket,\n which it labels. For example, in the original series the\n bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed\n value in the resampled bucket with the label ``2000-01-01 00:03:00``\n does not include 3 (if it did, the summed value would be 6, not 3).\n To include this value close the right side of the bin interval as\n illustrated in the example below this one.\n\n >>> series.resample('3T', label='right').sum()\n 2000-01-01 00:03:00 3\n 2000-01-01 00:06:00 12\n 2000-01-01 00:09:00 21\n Freq: 3T, dtype: int64\n\n Downsample the series into 3 minute bins as above, but close the right\n side of the bin interval.\n\n >>> series.resample('3T', label='right', closed='right').sum()\n 2000-01-01 00:00:00 0\n 2000-01-01 00:03:00 6\n 2000-01-01 00:06:00 15\n 2000-01-01 00:09:00 15\n Freq: 3T, dtype: int64\n\n Upsample the series into 30 second bins.\n\n >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows\n 2000-01-01 00:00:00 0.0\n 2000-01-01 00:00:30 NaN\n 2000-01-01 00:01:00 1.0\n 2000-01-01 00:01:30 NaN\n 2000-01-01 00:02:00 2.0\n Freq: 30S, dtype: float64\n\n Upsample the series into 30 second bins and fill the ``NaN``\n values using the ``pad`` method.\n\n >>> series.resample('30S').pad()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 0\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 1\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Upsample the series into 30 second bins and fill the\n ``NaN`` values using the ``bfill`` method.\n\n >>> series.resample('30S').bfill()[0:5]\n 2000-01-01 00:00:00 0\n 2000-01-01 00:00:30 1\n 2000-01-01 00:01:00 1\n 2000-01-01 00:01:30 2\n 2000-01-01 00:02:00 2\n Freq: 30S, dtype: int64\n\n Pass a custom function via ``apply``\n\n >>> def custom_resampler(array_like):\n ... return np.sum(array_like) + 5\n ...\n >>> series.resample('3T').apply(custom_resampler)\n 2000-01-01 00:00:00 8\n 2000-01-01 00:03:00 17\n 2000-01-01 00:06:00 26\n Freq: 3T, dtype: int64\n\n For a Series with a PeriodIndex, the keyword `convention` can be\n used to control whether to use the start or end of `rule`.\n\n Resample a year by quarter using 'start' `convention`. Values are\n assigned to the first quarter of the period.\n\n >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',\n ... freq='A',\n ... periods=2))\n >>> s\n 2012 1\n 2013 2\n Freq: A-DEC, dtype: int64\n >>> s.resample('Q', convention='start').asfreq()\n 2012Q1 1.0\n 2012Q2 NaN\n 2012Q3 NaN\n 2012Q4 NaN\n 2013Q1 2.0\n 2013Q2 NaN\n 2013Q3 NaN\n 2013Q4 NaN\n Freq: Q-DEC, dtype: float64\n\n Resample quarters by month using 'end' `convention`. Values are\n assigned to the last month of the period.\n\n >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',\n ... freq='Q',\n ... periods=4))\n >>> q\n 2018Q1 1\n 2018Q2 2\n 2018Q3 3\n 2018Q4 4\n Freq: Q-DEC, dtype: int64\n >>> q.resample('M', convention='end').asfreq()\n 2018-03 1.0\n 2018-04 NaN\n 2018-05 NaN\n 2018-06 2.0\n 2018-07 NaN\n 2018-08 NaN\n 2018-09 3.0\n 2018-10 NaN\n 2018-11 NaN\n 2018-12 4.0\n Freq: M, dtype: float64\n\n For DataFrame objects, the keyword `on` can be used to specify the\n column instead of the index for resampling.\n\n >>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],\n ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df = pd.DataFrame(d)\n >>> df['week_starting'] = pd.date_range('01/01/2018',\n ... periods=8,\n ... freq='W')\n >>> df\n price volume week_starting\n 0 10 50 2018-01-07\n 1 11 60 2018-01-14\n 2 9 40 2018-01-21\n 3 13 100 2018-01-28\n 4 14 50 2018-02-04\n 5 18 100 2018-02-11\n 6 17 40 2018-02-18\n 7 19 50 2018-02-25\n >>> df.resample('M', on='week_starting').mean()\n price volume\n week_starting\n 2018-01-31 10.75 62.5\n 2018-02-28 17.00 60.0\n\n For a DataFrame with MultiIndex, the keyword `level` can be used to\n specify on which level the resampling needs to take place.\n\n >>> days = pd.date_range('1/1/2000', periods=4, freq='D')\n >>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],\n ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})\n >>> df2 = pd.DataFrame(d2,\n ... index=pd.MultiIndex.from_product([days,\n ... ['morning',\n ... 'afternoon']]\n ... ))\n >>> df2\n price volume\n 2000-01-01 morning 10 50\n afternoon 11 60\n 2000-01-02 morning 9 40\n afternoon 13 100\n 2000-01-03 morning 14 50\n afternoon 18 100\n 2000-01-04 morning 17 40\n afternoon 19 50\n >>> df2.resample('D', level=0).sum()\n price volume\n 2000-01-01 21 110\n 2000-01-02 22 140\n 2000-01-03 32 150\n 2000-01-04 36 90\n \"\"\"\n\n from pandas.core.resample import resample\n\n axis = self._get_axis_number(axis)\n return resample(\n self,\n freq=rule,\n label=label,\n closed=closed,\n axis=axis,\n kind=kind,\n loffset=loffset,\n convention=convention,\n base=base,\n key=on,\n level=level,\n )\n\n def first(self, offset):\n \"\"\"\n Method to subset initial periods of time series data based on a date offset.\n\n Parameters\n ----------\n offset : str, DateOffset, dateutil.relativedelta\n\n Returns\n -------\n subset : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n last : Select final periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the first 3 days:\n\n >>> ts.first('3D')\n A\n 2018-04-09 1\n 2018-04-11 2\n\n Notice the data for 3 first calender days were returned, not the first\n 3 days observed in the dataset, and therefore data for 2018-04-13 was\n not returned.\n \"\"\"\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(\"'first' only supports a DatetimeIndex index\")\n\n if len(self.index) == 0:\n return self\n\n offset = to_offset(offset)\n end_date = end = self.index[0] + offset\n\n # Tick-like, e.g. 3 weeks\n if not offset.is_anchored() and hasattr(offset, \"_inc\"):\n if end_date in self.index:\n end = self.index.searchsorted(end_date, side=\"left\")\n return self.iloc[:end]\n\n return self.loc[:end]\n\n def last(self, offset):\n \"\"\"\n Method to subset final periods of time series data based on a date offset.\n\n Parameters\n ----------\n offset : str, DateOffset, dateutil.relativedelta\n\n Returns\n -------\n subset : same type as caller\n\n Raises\n ------\n TypeError\n If the index is not a :class:`DatetimeIndex`\n\n See Also\n --------\n first : Select initial periods of time series based on a date offset.\n at_time : Select values at a particular time of the day.\n between_time : Select values between particular times of the day.\n\n Examples\n --------\n >>> i = pd.date_range('2018-04-09', periods=4, freq='2D')\n >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)\n >>> ts\n A\n 2018-04-09 1\n 2018-04-11 2\n 2018-04-13 3\n 2018-04-15 4\n\n Get the rows for the last 3 days:\n\n >>> ts.last('3D')\n A\n 2018-04-13 3\n 2018-04-15 4\n\n Notice the data for 3 last calender days were returned, not the last\n 3 observed days in the dataset, and therefore data for 2018-04-11 was\n not returned.\n \"\"\"\n if not isinstance(self.index, DatetimeIndex):\n raise TypeError(\"'last' only supports a DatetimeIndex index\")\n\n if len(self.index) == 0:\n return self\n\n offset = to_offset(offset)\n\n start_date = self.index[-1] - offset\n start = self.index.searchsorted(start_date, side=\"right\")\n return self.iloc[start:]\n\n def rank(\n self: FrameOrSeries,\n axis=0,\n method: str = \"average\",\n numeric_only: Optional[bool_t] = None,\n na_option: str = \"keep\",\n ascending: bool_t = True,\n pct: bool_t = False,\n ) -> FrameOrSeries:\n \"\"\"\n Compute numerical data ranks (1 through n) along axis.\n\n By default, equal values are assigned a rank that is the average of the\n ranks of those values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Index to direct ranking.\n method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'\n How to rank the group of records that have the same value (i.e. ties):\n\n * average: average rank of the group\n * min: lowest rank in the group\n * max: highest rank in the group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups.\n\n numeric_only : bool, optional\n For DataFrame objects, rank only numeric columns if set to True.\n na_option : {'keep', 'top', 'bottom'}, default 'keep'\n How to rank NaN values:\n\n * keep: assign NaN rank to NaN values\n * top: assign smallest rank to NaN values if ascending\n * bottom: assign highest rank to NaN values if ascending.\n\n ascending : bool, default True\n Whether or not the elements should be ranked in ascending order.\n pct : bool, default False\n Whether or not to display the returned rankings in percentile\n form.\n\n Returns\n -------\n same type as caller\n Return a Series or DataFrame with data ranks as values.\n\n See Also\n --------\n core.groupby.GroupBy.rank : Rank of values within each group.\n\n Examples\n --------\n\n >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',\n ... 'spider', 'snake'],\n ... 'Number_legs': [4, 2, 4, 8, np.nan]})\n >>> df\n Animal Number_legs\n 0 cat 4.0\n 1 penguin 2.0\n 2 dog 4.0\n 3 spider 8.0\n 4 snake NaN\n\n The following example shows how the method behaves with the above\n parameters:\n\n * default_rank: this is the default behaviour obtained without using\n any parameter.\n * max_rank: setting ``method = 'max'`` the records that have the\n same values are ranked using the highest rank (e.g.: since 'cat'\n and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)\n * NA_bottom: choosing ``na_option = 'bottom'``, if there are records\n with NaN values they are placed at the bottom of the ranking.\n * pct_rank: when setting ``pct = True``, the ranking is expressed as\n percentile rank.\n\n >>> df['default_rank'] = df['Number_legs'].rank()\n >>> df['max_rank'] = df['Number_legs'].rank(method='max')\n >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')\n >>> df['pct_rank'] = df['Number_legs'].rank(pct=True)\n >>> df\n Animal Number_legs default_rank max_rank NA_bottom pct_rank\n 0 cat 4.0 2.5 3.0 2.5 0.625\n 1 penguin 2.0 1.0 1.0 1.0 0.250\n 2 dog 4.0 2.5 3.0 2.5 0.625\n 3 spider 8.0 4.0 4.0 4.0 1.000\n 4 snake NaN NaN NaN 5.0 NaN\n \"\"\"\n axis = self._get_axis_number(axis)\n\n if na_option not in {\"keep\", \"top\", \"bottom\"}:\n msg = \"na_option must be one of 'keep', 'top', or 'bottom'\"\n raise ValueError(msg)\n\n def ranker(data):\n ranks = algos.rank(\n data.values,\n axis=axis,\n method=method,\n ascending=ascending,\n na_option=na_option,\n pct=pct,\n )\n ranks = self._constructor(ranks, **data._construct_axes_dict())\n return ranks.__finalize__(self)\n\n # if numeric_only is None, and we can't get anything, we try with\n # numeric_only=True\n if numeric_only is None:\n try:\n return ranker(self)\n except TypeError:\n numeric_only = True\n\n if numeric_only:\n data = self._get_numeric_data()\n else:\n data = self\n\n return ranker(data)\n\n _shared_docs[\n \"align\"\n ] = \"\"\"\n Align two objects on their axes with the specified join method.\n\n Join method is specified for each axis Index.\n\n Parameters\n ----------\n other : DataFrame or Series\n join : {'outer', 'inner', 'left', 'right'}, default 'outer'\n axis : allowed axis of the other object, default None\n Align on index (0), columns (1), or both (None).\n level : int or level name, default None\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\n copy : bool, default True\n Always returns new objects. If copy=False and no reindexing is\n required then original objects are returned.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n \"compatible\" value.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series:\n\n - pad / ffill: propagate last valid observation forward to next valid.\n - backfill / bfill: use NEXT valid observation to fill gap.\n\n limit : int, default None\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled. Must be greater than 0 if not None.\n fill_axis : %(axes_single_arg)s, default 0\n Filling axis, method and limit.\n broadcast_axis : %(axes_single_arg)s, default None\n Broadcast values along this axis, if aligning two objects of\n different dimensions.\n\n Returns\n -------\n (left, right) : (%(klass)s, type of other)\n Aligned objects.\n \"\"\"\n\n @Appender(_shared_docs[\"align\"] % _shared_doc_kwargs)\n def align(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy=True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n broadcast_axis=None,\n ):\n method = missing.clean_fill_method(method)\n\n if broadcast_axis == 1 and self.ndim != other.ndim:\n if isinstance(self, ABCSeries):\n # this means other is a DataFrame, and we need to broadcast\n # self\n cons = self._constructor_expanddim\n df = cons(\n {c: self for c in other.columns}, **other._construct_axes_dict()\n )\n return df._align_frame(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n elif isinstance(other, ABCSeries):\n # this means self is a DataFrame, and we need to broadcast\n # other\n cons = other._constructor_expanddim\n df = cons(\n {c: other for c in self.columns}, **self._construct_axes_dict()\n )\n return self._align_frame(\n df,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n\n if axis is not None:\n axis = self._get_axis_number(axis)\n if isinstance(other, ABCDataFrame):\n return self._align_frame(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n elif isinstance(other, ABCSeries):\n return self._align_series(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n )\n else: # pragma: no cover\n raise TypeError(f\"unsupported type: {type(other)}\")\n\n def _align_frame(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy: bool_t = True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n ):\n # defaults\n join_index, join_columns = None, None\n ilidx, iridx = None, None\n clidx, cridx = None, None\n\n is_series = isinstance(self, ABCSeries)\n\n if axis is None or axis == 0:\n if not self.index.equals(other.index):\n join_index, ilidx, iridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n if axis is None or axis == 1:\n if not is_series and not self.columns.equals(other.columns):\n join_columns, clidx, cridx = self.columns.join(\n other.columns, how=join, level=level, return_indexers=True\n )\n\n if is_series:\n reindexers = {0: [join_index, ilidx]}\n else:\n reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}\n\n left = self._reindex_with_indexers(\n reindexers, copy=copy, fill_value=fill_value, allow_dups=True\n )\n # other must be always DataFrame\n right = other._reindex_with_indexers(\n {0: [join_index, iridx], 1: [join_columns, cridx]},\n copy=copy,\n fill_value=fill_value,\n allow_dups=True,\n )\n\n if method is not None:\n left = left.fillna(axis=fill_axis, method=method, limit=limit)\n right = right.fillna(axis=fill_axis, method=method, limit=limit)\n\n # if DatetimeIndex have different tz, convert to UTC\n if is_datetime64tz_dtype(left.index):\n if left.index.tz != right.index.tz:\n if join_index is not None:\n left.index = join_index\n right.index = join_index\n\n return left.__finalize__(self), right.__finalize__(other)\n\n def _align_series(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy: bool_t = True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n ):\n\n is_series = isinstance(self, ABCSeries)\n\n # series/series compat, other must always be a Series\n if is_series:\n if axis:\n raise ValueError(\"cannot align series to a series other than axis 0\")\n\n # equal\n if self.index.equals(other.index):\n join_index, lidx, ridx = None, None, None\n else:\n join_index, lidx, ridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n left = self._reindex_indexer(join_index, lidx, copy)\n right = other._reindex_indexer(join_index, ridx, copy)\n\n else:\n # one has > 1 ndim\n fdata = self._data\n if axis == 0:\n join_index = self.index\n lidx, ridx = None, None\n if not self.index.equals(other.index):\n join_index, lidx, ridx = self.index.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n if lidx is not None:\n fdata = fdata.reindex_indexer(join_index, lidx, axis=1)\n\n elif axis == 1:\n join_index = self.columns\n lidx, ridx = None, None\n if not self.columns.equals(other.index):\n join_index, lidx, ridx = self.columns.join(\n other.index, how=join, level=level, return_indexers=True\n )\n\n if lidx is not None:\n fdata = fdata.reindex_indexer(join_index, lidx, axis=0)\n else:\n raise ValueError(\"Must specify axis=0 or 1\")\n\n if copy and fdata is self._data:\n fdata = fdata.copy()\n\n left = self._constructor(fdata)\n\n if ridx is None:\n right = other\n else:\n right = other.reindex(join_index, level=level)\n\n # fill\n fill_na = notna(fill_value) or (method is not None)\n if fill_na:\n left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)\n right = right.fillna(fill_value, method=method, limit=limit)\n\n # if DatetimeIndex have different tz, convert to UTC\n if is_series or (not is_series and axis == 0):\n if is_datetime64tz_dtype(left.index):\n if left.index.tz != right.index.tz:\n if join_index is not None:\n left.index = join_index\n right.index = join_index\n\n return left.__finalize__(self), right.__finalize__(other)\n\n def _where(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=\"raise\",\n try_cast=False,\n ):\n \"\"\"\n Equivalent to public method `where`, except that `other` is not\n applied as a function even if callable. Used in __setitem__.\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n # align the cond to same shape as myself\n cond = com.apply_if_callable(cond, self)\n if isinstance(cond, NDFrame):\n cond, _ = cond.align(self, join=\"right\", broadcast_axis=1)\n else:\n if not hasattr(cond, \"shape\"):\n cond = np.asanyarray(cond)\n if cond.shape != self.shape:\n raise ValueError(\"Array conditional must be same shape as self\")\n cond = self._constructor(cond, **self._construct_axes_dict())\n\n # make sure we are boolean\n fill_value = bool(inplace)\n cond = cond.fillna(fill_value)\n\n msg = \"Boolean array expected for the condition, not {dtype}\"\n\n if not isinstance(cond, ABCDataFrame):\n # This is a single-dimensional object.\n if not is_bool_dtype(cond):\n raise ValueError(msg.format(dtype=cond.dtype))\n elif not cond.empty:\n for dt in cond.dtypes:\n if not is_bool_dtype(dt):\n raise ValueError(msg.format(dtype=dt))\n\n cond = -cond if inplace else cond\n\n # try to align with other\n try_quick = True\n if hasattr(other, \"align\"):\n\n # align with me\n if other.ndim <= self.ndim:\n\n _, other = self.align(\n other, join=\"left\", axis=axis, level=level, fill_value=np.nan\n )\n\n # if we are NOT aligned, raise as we cannot where index\n if axis is None and not all(\n other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)\n ):\n raise InvalidIndexError\n\n # slice me out of the other\n else:\n raise NotImplementedError(\n \"cannot align with a higher dimensional NDFrame\"\n )\n\n if isinstance(other, np.ndarray):\n\n if other.shape != self.shape:\n\n if self.ndim == 1:\n\n icond = cond.values\n\n # GH 2745 / GH 4192\n # treat like a scalar\n if len(other) == 1:\n other = np.array(other[0])\n\n # GH 3235\n # match True cond to other\n elif len(cond[icond]) == len(other):\n\n # try to not change dtype at first (if try_quick)\n if try_quick:\n new_other = com.values_from_object(self)\n new_other = new_other.copy()\n new_other[icond] = other\n other = new_other\n\n else:\n raise ValueError(\n \"Length of replacements must equal series length\"\n )\n\n else:\n raise ValueError(\n \"other must be the same shape as self when an ndarray\"\n )\n\n # we are the same shape, so create an actual object for alignment\n else:\n other = self._constructor(other, **self._construct_axes_dict())\n\n if axis is None:\n axis = 0\n\n if self.ndim == getattr(other, \"ndim\", 0):\n align = True\n else:\n align = self._get_axis_number(axis) == 1\n\n block_axis = self._get_block_manager_axis(axis)\n\n if inplace:\n # we may have different type blocks come out of putmask, so\n # reconstruct the block manager\n\n self._check_inplace_setting(other)\n new_data = self._data.putmask(\n mask=cond,\n new=other,\n align=align,\n inplace=True,\n axis=block_axis,\n transpose=self._AXIS_REVERSED,\n )\n self._update_inplace(new_data)\n\n else:\n new_data = self._data.where(\n other=other,\n cond=cond,\n align=align,\n errors=errors,\n try_cast=try_cast,\n axis=block_axis,\n )\n\n return self._constructor(new_data).__finalize__(self)\n\n _shared_docs[\n \"where\"\n ] = \"\"\"\n Replace values where the condition is %(cond_rev)s.\n\n Parameters\n ----------\n cond : bool %(klass)s, array-like, or callable\n Where `cond` is %(cond)s, keep the original value. Where\n %(cond_rev)s, replace with corresponding value from `other`.\n If `cond` is callable, it is computed on the %(klass)s and\n should return boolean %(klass)s or array. The callable must\n not change input %(klass)s (though pandas doesn't check it).\n other : scalar, %(klass)s, or callable\n Entries where `cond` is %(cond_rev)s are replaced with\n corresponding value from `other`.\n If other is callable, it is computed on the %(klass)s and\n should return scalar or %(klass)s. The callable must not\n change input %(klass)s (though pandas doesn't check it).\n inplace : bool, default False\n Whether to perform the operation in place on the data.\n axis : int, default None\n Alignment axis if needed.\n level : int, default None\n Alignment level if needed.\n errors : str, {'raise', 'ignore'}, default 'raise'\n Note that currently this parameter won't affect\n the results and will always coerce to a suitable dtype.\n\n - 'raise' : allow exceptions to be raised.\n - 'ignore' : suppress exceptions. On error return original object.\n\n try_cast : bool, default False\n Try to cast the result back to the input type (if possible).\n\n Returns\n -------\n Same type as caller\n\n See Also\n --------\n :func:`DataFrame.%(name_other)s` : Return an object of same shape as\n self.\n\n Notes\n -----\n The %(name)s method is an application of the if-then idiom. For each\n element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the\n element is used; otherwise the corresponding element from the DataFrame\n ``other`` is used.\n\n The signature for :func:`DataFrame.where` differs from\n :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to\n ``np.where(m, df1, df2)``.\n\n For further details and examples see the ``%(name)s`` documentation in\n :ref:`indexing <indexing.where_mask>`.\n\n Examples\n --------\n >>> s = pd.Series(range(5))\n >>> s.where(s > 0)\n 0 NaN\n 1 1.0\n 2 2.0\n 3 3.0\n 4 4.0\n dtype: float64\n\n >>> s.mask(s > 0)\n 0 0.0\n 1 NaN\n 2 NaN\n 3 NaN\n 4 NaN\n dtype: float64\n\n >>> s.where(s > 1, 10)\n 0 10\n 1 10\n 2 2\n 3 3\n 4 4\n dtype: int64\n\n >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])\n >>> df\n A B\n 0 0 1\n 1 2 3\n 2 4 5\n 3 6 7\n 4 8 9\n >>> m = df %% 3 == 0\n >>> df.where(m, -df)\n A B\n 0 0 -1\n 1 -2 3\n 2 -4 -5\n 3 6 -7\n 4 -8 9\n >>> df.where(m, -df) == np.where(m, df, -df)\n A B\n 0 True True\n 1 True True\n 2 True True\n 3 True True\n 4 True True\n >>> df.where(m, -df) == df.mask(~m, -df)\n A B\n 0 True True\n 1 True True\n 2 True True\n 3 True True\n 4 True True\n \"\"\"\n\n @Appender(\n _shared_docs[\"where\"]\n % dict(\n _shared_doc_kwargs,\n cond=\"True\",\n cond_rev=\"False\",\n name=\"where\",\n name_other=\"mask\",\n )\n )\n def where(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=\"raise\",\n try_cast=False,\n ):\n\n other = com.apply_if_callable(other, self)\n return self._where(\n cond, other, inplace, axis, level, errors=errors, try_cast=try_cast\n )\n\n @Appender(\n _shared_docs[\"where\"]\n % dict(\n _shared_doc_kwargs,\n cond=\"False\",\n cond_rev=\"True\",\n name=\"mask\",\n name_other=\"where\",\n )\n )\n def mask(\n self,\n cond,\n other=np.nan,\n inplace=False,\n axis=None,\n level=None,\n errors=\"raise\",\n try_cast=False,\n ):\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n cond = com.apply_if_callable(cond, self)\n\n # see gh-21891\n if not hasattr(cond, \"__invert__\"):\n cond = np.array(cond)\n\n return self.where(\n ~cond,\n other=other,\n inplace=inplace,\n axis=axis,\n level=level,\n try_cast=try_cast,\n errors=errors,\n )\n\n _shared_docs[\n \"shift\"\n ] = \"\"\"\n Shift index by desired number of periods with an optional time `freq`.\n\n When `freq` is not passed, shift the index without realigning the data.\n If `freq` is passed (in this case, the index must be date or datetime,\n or it will raise a `NotImplementedError`), the index will be\n increased using the periods and the `freq`.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift. Can be positive or negative.\n freq : DateOffset, tseries.offsets, timedelta, or str, optional\n Offset to use from the tseries module or time rule (e.g. 'EOM').\n If `freq` is specified then the index values are shifted but the\n data is not realigned. That is, use `freq` if you would like to\n extend the index when shifting and preserve the original data.\n axis : {0 or 'index', 1 or 'columns', None}, default None\n Shift direction.\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n the default depends on the dtype of `self`.\n For numeric data, ``np.nan`` is used.\n For datetime, timedelta, or period data, etc. :attr:`NaT` is used.\n For extension dtypes, ``self.dtype.na_value`` is used.\n\n .. versionchanged:: 0.24.0\n\n Returns\n -------\n %(klass)s\n Copy of input object, shifted.\n\n See Also\n --------\n Index.shift : Shift values of Index.\n DatetimeIndex.shift : Shift values of DatetimeIndex.\n PeriodIndex.shift : Shift values of PeriodIndex.\n tshift : Shift the time index, using the index's frequency if\n available.\n\n Examples\n --------\n >>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],\n ... 'Col2': [13, 23, 18, 33, 48],\n ... 'Col3': [17, 27, 22, 37, 52]})\n\n >>> df.shift(periods=3)\n Col1 Col2 Col3\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 10.0 13.0 17.0\n 4 20.0 23.0 27.0\n\n >>> df.shift(periods=1, axis='columns')\n Col1 Col2 Col3\n 0 NaN 10.0 13.0\n 1 NaN 20.0 23.0\n 2 NaN 15.0 18.0\n 3 NaN 30.0 33.0\n 4 NaN 45.0 48.0\n\n >>> df.shift(periods=3, fill_value=0)\n Col1 Col2 Col3\n 0 0 0 0\n 1 0 0 0\n 2 0 0 0\n 3 10 13 17\n 4 20 23 27\n \"\"\"\n\n @Appender(_shared_docs[\"shift\"] % _shared_doc_kwargs)\n def shift(self, periods=1, freq=None, axis=0, fill_value=None):\n if periods == 0:\n return self.copy()\n\n block_axis = self._get_block_manager_axis(axis)\n if freq is None:\n new_data = self._data.shift(\n periods=periods, axis=block_axis, fill_value=fill_value\n )\n else:\n return self.tshift(periods, freq)\n\n return self._constructor(new_data).__finalize__(self)\n\n def slice_shift(self: FrameOrSeries, periods: int = 1, axis=0) -> FrameOrSeries:\n \"\"\"\n Equivalent to `shift` without copying data.\n\n The shifted data will not include the dropped periods and the\n shifted axis will be smaller than the original.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative.\n\n Returns\n -------\n shifted : same type as caller\n\n Notes\n -----\n While the `slice_shift` is faster than `shift`, you may pay for it\n later during alignment.\n \"\"\"\n if periods == 0:\n return self\n\n if periods > 0:\n vslicer = slice(None, -periods)\n islicer = slice(periods, None)\n else:\n vslicer = slice(-periods, None)\n islicer = slice(None, periods)\n\n new_obj = self._slice(vslicer, axis=axis)\n shifted_axis = self._get_axis(axis)[islicer]\n new_obj.set_axis(shifted_axis, axis=axis, inplace=True)\n\n return new_obj.__finalize__(self)\n\n def tshift(self, periods: int = 1, freq=None, axis=0):\n \"\"\"\n Shift the time index, using the index's frequency if available.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative.\n freq : DateOffset, timedelta, or str, default None\n Increment to use from the tseries module\n or time rule expressed as a string (e.g. 'EOM').\n axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0\n Corresponds to the axis that contains the Index.\n\n Returns\n -------\n shifted : Series/DataFrame\n\n Notes\n -----\n If freq is not specified then tries to use the freq or inferred_freq\n attributes of the index. If neither of those attributes exist, a\n ValueError is thrown\n \"\"\"\n\n index = self._get_axis(axis)\n if freq is None:\n freq = getattr(index, \"freq\", None)\n\n if freq is None:\n freq = getattr(index, \"inferred_freq\", None)\n\n if freq is None:\n msg = \"Freq was not given and was not set in the index\"\n raise ValueError(msg)\n\n if periods == 0:\n return self\n\n if isinstance(freq, str):\n freq = to_offset(freq)\n\n block_axis = self._get_block_manager_axis(axis)\n if isinstance(index, PeriodIndex):\n orig_freq = to_offset(index.freq)\n if freq == orig_freq:\n new_data = self._data.copy()\n new_data.axes[block_axis] = index.shift(periods)\n elif orig_freq is not None:\n msg = (\n f\"Given freq {freq.rule_code} does not match\"\n f\" PeriodIndex freq {orig_freq.rule_code}\"\n )\n raise ValueError(msg)\n else:\n new_data = self._data.copy()\n new_data.axes[block_axis] = index.shift(periods, freq)\n\n return self._constructor(new_data).__finalize__(self)\n\n def truncate(\n self: FrameOrSeries, before=None, after=None, axis=None, copy: bool_t = True\n ) -> FrameOrSeries:\n \"\"\"\n Truncate a Series or DataFrame before and after some index value.\n\n This is a useful shorthand for boolean indexing based on index\n values above or below certain thresholds.\n\n Parameters\n ----------\n before : date, str, int\n Truncate all rows before this index value.\n after : date, str, int\n Truncate all rows after this index value.\n axis : {0 or 'index', 1 or 'columns'}, optional\n Axis to truncate. Truncates the index (rows) by default.\n copy : bool, default is True,\n Return a copy of the truncated section.\n\n Returns\n -------\n type of caller\n The truncated Series or DataFrame.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by label.\n DataFrame.iloc : Select a subset of a DataFrame by position.\n\n Notes\n -----\n If the index being truncated contains only datetime values,\n `before` and `after` may be specified as strings instead of\n Timestamps.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],\n ... 'B': ['f', 'g', 'h', 'i', 'j'],\n ... 'C': ['k', 'l', 'm', 'n', 'o']},\n ... index=[1, 2, 3, 4, 5])\n >>> df\n A B C\n 1 a f k\n 2 b g l\n 3 c h m\n 4 d i n\n 5 e j o\n\n >>> df.truncate(before=2, after=4)\n A B C\n 2 b g l\n 3 c h m\n 4 d i n\n\n The columns of a DataFrame can be truncated.\n\n >>> df.truncate(before=\"A\", after=\"B\", axis=\"columns\")\n A B\n 1 a f\n 2 b g\n 3 c h\n 4 d i\n 5 e j\n\n For Series, only rows can be truncated.\n\n >>> df['A'].truncate(before=2, after=4)\n 2 b\n 3 c\n 4 d\n Name: A, dtype: object\n\n The index values in ``truncate`` can be datetimes or string\n dates.\n\n >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')\n >>> df = pd.DataFrame(index=dates, data={'A': 1})\n >>> df.tail()\n A\n 2016-01-31 23:59:56 1\n 2016-01-31 23:59:57 1\n 2016-01-31 23:59:58 1\n 2016-01-31 23:59:59 1\n 2016-02-01 00:00:00 1\n\n >>> df.truncate(before=pd.Timestamp('2016-01-05'),\n ... after=pd.Timestamp('2016-01-10')).tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Because the index is a DatetimeIndex containing only dates, we can\n specify `before` and `after` as strings. They will be coerced to\n Timestamps before truncation.\n\n >>> df.truncate('2016-01-05', '2016-01-10').tail()\n A\n 2016-01-09 23:59:56 1\n 2016-01-09 23:59:57 1\n 2016-01-09 23:59:58 1\n 2016-01-09 23:59:59 1\n 2016-01-10 00:00:00 1\n\n Note that ``truncate`` assumes a 0 value for any unspecified time\n component (midnight). This differs from partial string slicing, which\n returns any partially matching dates.\n\n >>> df.loc['2016-01-05':'2016-01-10', :].tail()\n A\n 2016-01-10 23:59:55 1\n 2016-01-10 23:59:56 1\n 2016-01-10 23:59:57 1\n 2016-01-10 23:59:58 1\n 2016-01-10 23:59:59 1\n \"\"\"\n if axis is None:\n axis = self._stat_axis_number\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n # GH 17935\n # Check that index is sorted\n if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:\n raise ValueError(\"truncate requires a sorted index\")\n\n # if we have a date index, convert to dates, otherwise\n # treat like a slice\n if ax.is_all_dates:\n from pandas.core.tools.datetimes import to_datetime\n\n before = to_datetime(before)\n after = to_datetime(after)\n\n if before is not None and after is not None:\n if before > after:\n raise ValueError(f\"Truncate: {after} must be after {before}\")\n\n slicer = [slice(None, None)] * self._AXIS_LEN\n slicer[axis] = slice(before, after)\n result = self.loc[tuple(slicer)]\n\n if isinstance(ax, MultiIndex):\n setattr(result, self._get_axis_name(axis), ax.truncate(before, after))\n\n if copy:\n result = result.copy()\n\n return result\n\n def tz_convert(\n self: FrameOrSeries, tz, axis=0, level=None, copy: bool_t = True\n ) -> FrameOrSeries:\n \"\"\"\n Convert tz-aware axis to target time zone.\n\n Parameters\n ----------\n tz : str or tzinfo object\n axis : the axis to convert\n level : int, str, default None\n If axis is a MultiIndex, convert a specific level. Otherwise\n must be None.\n copy : bool, default True\n Also make a copy of the underlying data.\n\n Returns\n -------\n %(klass)s\n Object with time zone converted axis.\n\n Raises\n ------\n TypeError\n If the axis is tz-naive.\n \"\"\"\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n def _tz_convert(ax, tz):\n if not hasattr(ax, \"tz_convert\"):\n if len(ax) > 0:\n ax_name = self._get_axis_name(axis)\n raise TypeError(\n f\"{ax_name} is not a valid DatetimeIndex or PeriodIndex\"\n )\n else:\n ax = DatetimeIndex([], tz=tz)\n else:\n ax = ax.tz_convert(tz)\n return ax\n\n # if a level is given it must be a MultiIndex level or\n # equivalent to the axis name\n if isinstance(ax, MultiIndex):\n level = ax._get_level_number(level)\n new_level = _tz_convert(ax.levels[level], tz)\n ax = ax.set_levels(new_level, level=level)\n else:\n if level not in (None, 0, ax.name):\n raise ValueError(f\"The level {level} is not valid\")\n ax = _tz_convert(ax, tz)\n\n result = self._constructor(self._data, copy=copy)\n result = result.set_axis(ax, axis=axis, inplace=False)\n return result.__finalize__(self)\n\n def tz_localize(\n self: FrameOrSeries,\n tz,\n axis=0,\n level=None,\n copy: bool_t = True,\n ambiguous=\"raise\",\n nonexistent: str = \"raise\",\n ) -> FrameOrSeries:\n \"\"\"\n Localize tz-naive index of a Series or DataFrame to target time zone.\n\n This operation localizes the Index. To localize the values in a\n timezone-naive Series, use :meth:`Series.dt.tz_localize`.\n\n Parameters\n ----------\n tz : str or tzinfo\n axis : the axis to localize\n level : int, str, default None\n If axis ia a MultiIndex, localize a specific level. Otherwise\n must be None.\n copy : bool, default True\n Also make a copy of the underlying data.\n ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'\n When clocks moved backward due to DST, ambiguous times may arise.\n For example in Central European Time (UTC+01), when going from\n 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at\n 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the\n `ambiguous` parameter dictates how ambiguous times should be\n handled.\n\n - 'infer' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False designates\n a non-DST time (note that this flag is only applicable for\n ambiguous times)\n - 'NaT' will return NaT where there are ambiguous times\n - 'raise' will raise an AmbiguousTimeError if there are ambiguous\n times.\n nonexistent : str, default 'raise'\n A nonexistent time does not exist in a particular timezone\n where clocks moved forward due to DST. Valid values are:\n\n - 'shift_forward' will shift the nonexistent time forward to the\n closest existing time\n - 'shift_backward' will shift the nonexistent time backward to the\n closest existing time\n - 'NaT' will return NaT where there are nonexistent times\n - timedelta objects will shift nonexistent times by the timedelta\n - 'raise' will raise an NonExistentTimeError if there are\n nonexistent times.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series or DataFrame\n Same type as the input.\n\n Raises\n ------\n TypeError\n If the TimeSeries is tz-aware and tz is not None.\n\n Examples\n --------\n\n Localize local times:\n\n >>> s = pd.Series([1],\n ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))\n >>> s.tz_localize('CET')\n 2018-09-15 01:30:00+02:00 1\n dtype: int64\n\n Be careful with DST changes. When there is sequential data, pandas\n can infer the DST time:\n\n >>> s = pd.Series(range(7),\n ... index=pd.DatetimeIndex(['2018-10-28 01:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 02:00:00',\n ... '2018-10-28 02:30:00',\n ... '2018-10-28 03:00:00',\n ... '2018-10-28 03:30:00']))\n >>> s.tz_localize('CET', ambiguous='infer')\n 2018-10-28 01:30:00+02:00 0\n 2018-10-28 02:00:00+02:00 1\n 2018-10-28 02:30:00+02:00 2\n 2018-10-28 02:00:00+01:00 3\n 2018-10-28 02:30:00+01:00 4\n 2018-10-28 03:00:00+01:00 5\n 2018-10-28 03:30:00+01:00 6\n dtype: int64\n\n In some cases, inferring the DST is impossible. In such cases, you can\n pass an ndarray to the ambiguous parameter to set the DST explicitly\n\n >>> s = pd.Series(range(3),\n ... index=pd.DatetimeIndex(['2018-10-28 01:20:00',\n ... '2018-10-28 02:36:00',\n ... '2018-10-28 03:46:00']))\n >>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))\n 2018-10-28 01:20:00+02:00 0\n 2018-10-28 02:36:00+02:00 1\n 2018-10-28 03:46:00+01:00 2\n dtype: int64\n\n If the DST transition causes nonexistent times, you can shift these\n dates forward or backwards with a timedelta object or `'shift_forward'`\n or `'shift_backwards'`.\n >>> s = pd.Series(range(2),\n ... index=pd.DatetimeIndex(['2015-03-29 02:30:00',\n ... '2015-03-29 03:30:00']))\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')\n 2015-03-29 03:00:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')\n 2015-03-29 01:59:59.999999999+01:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))\n 2015-03-29 03:30:00+02:00 0\n 2015-03-29 03:30:00+02:00 1\n dtype: int64\n \"\"\"\n nonexistent_options = (\"raise\", \"NaT\", \"shift_forward\", \"shift_backward\")\n if nonexistent not in nonexistent_options and not isinstance(\n nonexistent, timedelta\n ):\n raise ValueError(\n \"The nonexistent argument must be one of 'raise', \"\n \"'NaT', 'shift_forward', 'shift_backward' or \"\n \"a timedelta object\"\n )\n\n axis = self._get_axis_number(axis)\n ax = self._get_axis(axis)\n\n def _tz_localize(ax, tz, ambiguous, nonexistent):\n if not hasattr(ax, \"tz_localize\"):\n if len(ax) > 0:\n ax_name = self._get_axis_name(axis)\n raise TypeError(\n f\"{ax_name} is not a valid DatetimeIndex or PeriodIndex\"\n )\n else:\n ax = DatetimeIndex([], tz=tz)\n else:\n ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)\n return ax\n\n # if a level is given it must be a MultiIndex level or\n # equivalent to the axis name\n if isinstance(ax, MultiIndex):\n level = ax._get_level_number(level)\n new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)\n ax = ax.set_levels(new_level, level=level)\n else:\n if level not in (None, 0, ax.name):\n raise ValueError(f\"The level {level} is not valid\")\n ax = _tz_localize(ax, tz, ambiguous, nonexistent)\n\n result = self._constructor(self._data, copy=copy)\n result = result.set_axis(ax, axis=axis, inplace=False)\n return result.__finalize__(self)\n\n # ----------------------------------------------------------------------\n # Numeric Methods\n def abs(self):\n \"\"\"\n Return a Series/DataFrame with absolute numeric value of each element.\n\n This function only applies to elements that are all numeric.\n\n Returns\n -------\n abs\n Series/DataFrame containing the absolute value of each element.\n\n See Also\n --------\n numpy.absolute : Calculate the absolute value element-wise.\n\n Notes\n -----\n For ``complex`` inputs, ``1.2 + 1j``, the absolute value is\n :math:`\\\\sqrt{ a^2 + b^2 }`.\n\n Examples\n --------\n Absolute numeric values in a Series.\n\n >>> s = pd.Series([-1.10, 2, -3.33, 4])\n >>> s.abs()\n 0 1.10\n 1 2.00\n 2 3.33\n 3 4.00\n dtype: float64\n\n Absolute numeric values in a Series with complex numbers.\n\n >>> s = pd.Series([1.2 + 1j])\n >>> s.abs()\n 0 1.56205\n dtype: float64\n\n Absolute numeric values in a Series with a Timedelta element.\n\n >>> s = pd.Series([pd.Timedelta('1 days')])\n >>> s.abs()\n 0 1 days\n dtype: timedelta64[ns]\n\n Select rows with data closest to certain value using argsort (from\n `StackOverflow <https://stackoverflow.com/a/17758115>`__).\n\n >>> df = pd.DataFrame({\n ... 'a': [4, 5, 6, 7],\n ... 'b': [10, 20, 30, 40],\n ... 'c': [100, 50, -30, -50]\n ... })\n >>> df\n a b c\n 0 4 10 100\n 1 5 20 50\n 2 6 30 -30\n 3 7 40 -50\n >>> df.loc[(df.c - 43).abs().argsort()]\n a b c\n 1 5 20 50\n 0 4 10 100\n 2 6 30 -30\n 3 7 40 -50\n \"\"\"\n return np.abs(self)\n\n def describe(self, percentiles=None, include=None, exclude=None):\n \"\"\"\n Generate descriptive statistics.\n\n Descriptive statistics include those that summarize the central\n tendency, dispersion and shape of a\n dataset's distribution, excluding ``NaN`` values.\n\n Analyzes both numeric and object series, as well\n as ``DataFrame`` column sets of mixed data types. The output\n will vary depending on what is provided. Refer to the notes\n below for more detail.\n\n Parameters\n ----------\n percentiles : list-like of numbers, optional\n The percentiles to include in the output. All should\n fall between 0 and 1. The default is\n ``[.25, .5, .75]``, which returns the 25th, 50th, and\n 75th percentiles.\n include : 'all', list-like of dtypes or None (default), optional\n A white list of data types to include in the result. Ignored\n for ``Series``. Here are the options:\n\n - 'all' : All columns of the input will be included in the output.\n - A list-like of dtypes : Limits the results to the\n provided data types.\n To limit the result to numeric types submit\n ``numpy.number``. To limit it instead to object columns submit\n the ``numpy.object`` data type. Strings\n can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To\n select pandas categorical columns, use ``'category'``\n - None (default) : The result will include all numeric columns.\n exclude : list-like of dtypes or None (default), optional,\n A black list of data types to omit from the result. Ignored\n for ``Series``. Here are the options:\n\n - A list-like of dtypes : Excludes the provided data types\n from the result. To exclude numeric types submit\n ``numpy.number``. To exclude object columns submit the data\n type ``numpy.object``. Strings can also be used in the style of\n ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To\n exclude pandas categorical columns, use ``'category'``\n - None (default) : The result will exclude nothing.\n\n Returns\n -------\n Series or DataFrame\n Summary statistics of the Series or Dataframe provided.\n\n See Also\n --------\n DataFrame.count: Count number of non-NA/null observations.\n DataFrame.max: Maximum of the values in the object.\n DataFrame.min: Minimum of the values in the object.\n DataFrame.mean: Mean of the values.\n DataFrame.std: Standard deviation of the observations.\n DataFrame.select_dtypes: Subset of a DataFrame including/excluding\n columns based on their dtype.\n\n Notes\n -----\n For numeric data, the result's index will include ``count``,\n ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and\n upper percentiles. By default the lower percentile is ``25`` and the\n upper percentile is ``75``. The ``50`` percentile is the\n same as the median.\n\n For object data (e.g. strings or timestamps), the result's index\n will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``\n is the most common value. The ``freq`` is the most common value's\n frequency. Timestamps also include the ``first`` and ``last`` items.\n\n If multiple object values have the highest count, then the\n ``count`` and ``top`` results will be arbitrarily chosen from\n among those with the highest count.\n\n For mixed data types provided via a ``DataFrame``, the default is to\n return only an analysis of numeric columns. If the dataframe consists\n only of object and categorical data without any numeric columns, the\n default is to return an analysis of both the object and categorical\n columns. If ``include='all'`` is provided as an option, the result\n will include a union of attributes of each type.\n\n The `include` and `exclude` parameters can be used to limit\n which columns in a ``DataFrame`` are analyzed for the output.\n The parameters are ignored when analyzing a ``Series``.\n\n Examples\n --------\n Describing a numeric ``Series``.\n\n >>> s = pd.Series([1, 2, 3])\n >>> s.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n dtype: float64\n\n Describing a categorical ``Series``.\n\n >>> s = pd.Series(['a', 'a', 'b', 'c'])\n >>> s.describe()\n count 4\n unique 3\n top a\n freq 2\n dtype: object\n\n Describing a timestamp ``Series``.\n\n >>> s = pd.Series([\n ... np.datetime64(\"2000-01-01\"),\n ... np.datetime64(\"2010-01-01\"),\n ... np.datetime64(\"2010-01-01\")\n ... ])\n >>> s.describe()\n count 3\n unique 2\n top 2010-01-01 00:00:00\n freq 2\n first 2000-01-01 00:00:00\n last 2010-01-01 00:00:00\n dtype: object\n\n Describing a ``DataFrame``. By default only numeric fields\n are returned.\n\n >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),\n ... 'numeric': [1, 2, 3],\n ... 'object': ['a', 'b', 'c']\n ... })\n >>> df.describe()\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Describing all columns of a ``DataFrame`` regardless of data type.\n\n >>> df.describe(include='all')\n categorical numeric object\n count 3 3.0 3\n unique 3 NaN 3\n top f NaN c\n freq 1 NaN 1\n mean NaN 2.0 NaN\n std NaN 1.0 NaN\n min NaN 1.0 NaN\n 25% NaN 1.5 NaN\n 50% NaN 2.0 NaN\n 75% NaN 2.5 NaN\n max NaN 3.0 NaN\n\n Describing a column from a ``DataFrame`` by accessing it as\n an attribute.\n\n >>> df.numeric.describe()\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n Name: numeric, dtype: float64\n\n Including only numeric columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.number])\n numeric\n count 3.0\n mean 2.0\n std 1.0\n min 1.0\n 25% 1.5\n 50% 2.0\n 75% 2.5\n max 3.0\n\n Including only string columns in a ``DataFrame`` description.\n\n >>> df.describe(include=[np.object])\n object\n count 3\n unique 3\n top c\n freq 1\n\n Including only categorical columns from a ``DataFrame`` description.\n\n >>> df.describe(include=['category'])\n categorical\n count 3\n unique 3\n top f\n freq 1\n\n Excluding numeric columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.number])\n categorical object\n count 3 3\n unique 3 3\n top f c\n freq 1 1\n\n Excluding object columns from a ``DataFrame`` description.\n\n >>> df.describe(exclude=[np.object])\n categorical numeric\n count 3 3.0\n unique 3 NaN\n top f NaN\n freq 1 NaN\n mean NaN 2.0\n std NaN 1.0\n min NaN 1.0\n 25% NaN 1.5\n 50% NaN 2.0\n 75% NaN 2.5\n max NaN 3.0\n \"\"\"\n if self.ndim == 2 and self.columns.size == 0:\n raise ValueError(\"Cannot describe a DataFrame without columns\")\n\n if percentiles is not None:\n # explicit conversion of `percentiles` to list\n percentiles = list(percentiles)\n\n # get them all to be in [0, 1]\n validate_percentile(percentiles)\n\n # median should always be included\n if 0.5 not in percentiles:\n percentiles.append(0.5)\n percentiles = np.asarray(percentiles)\n else:\n percentiles = np.array([0.25, 0.5, 0.75])\n\n # sort and check for duplicates\n unique_pcts = np.unique(percentiles)\n if len(unique_pcts) < len(percentiles):\n raise ValueError(\"percentiles cannot contain duplicates\")\n percentiles = unique_pcts\n\n formatted_percentiles = format_percentiles(percentiles)\n\n def describe_numeric_1d(series):\n stat_index = (\n [\"count\", \"mean\", \"std\", \"min\"] + formatted_percentiles + [\"max\"]\n )\n d = (\n [series.count(), series.mean(), series.std(), series.min()]\n + series.quantile(percentiles).tolist()\n + [series.max()]\n )\n return pd.Series(d, index=stat_index, name=series.name)\n\n def describe_categorical_1d(data):\n names = [\"count\", \"unique\"]\n objcounts = data.value_counts()\n count_unique = len(objcounts[objcounts != 0])\n result = [data.count(), count_unique]\n dtype = None\n if result[1] > 0:\n top, freq = objcounts.index[0], objcounts.iloc[0]\n\n if is_datetime64_any_dtype(data):\n tz = data.dt.tz\n asint = data.dropna().values.view(\"i8\")\n top = Timestamp(top)\n if top.tzinfo is not None and tz is not None:\n # Don't tz_localize(None) if key is already tz-aware\n top = top.tz_convert(tz)\n else:\n top = top.tz_localize(tz)\n names += [\"top\", \"freq\", \"first\", \"last\"]\n result += [\n top,\n freq,\n Timestamp(asint.min(), tz=tz),\n Timestamp(asint.max(), tz=tz),\n ]\n else:\n names += [\"top\", \"freq\"]\n result += [top, freq]\n\n # If the DataFrame is empty, set 'top' and 'freq' to None\n # to maintain output shape consistency\n else:\n names += [\"top\", \"freq\"]\n result += [np.nan, np.nan]\n dtype = \"object\"\n\n return pd.Series(result, index=names, name=data.name, dtype=dtype)\n\n def describe_1d(data):\n if is_bool_dtype(data):\n return describe_categorical_1d(data)\n elif is_numeric_dtype(data):\n return describe_numeric_1d(data)\n elif is_timedelta64_dtype(data):\n return describe_numeric_1d(data)\n else:\n return describe_categorical_1d(data)\n\n if self.ndim == 1:\n return describe_1d(self)\n elif (include is None) and (exclude is None):\n # when some numerics are found, keep only numerics\n data = self.select_dtypes(include=[np.number])\n if len(data.columns) == 0:\n data = self\n elif include == \"all\":\n if exclude is not None:\n msg = \"exclude must be None when include is 'all'\"\n raise ValueError(msg)\n data = self\n else:\n data = self.select_dtypes(include=include, exclude=exclude)\n\n ldesc = [describe_1d(s) for _, s in data.items()]\n # set a convenient order for rows\n names = []\n ldesc_indexes = sorted((x.index for x in ldesc), key=len)\n for idxnames in ldesc_indexes:\n for name in idxnames:\n if name not in names:\n names.append(name)\n\n d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)\n d.columns = data.columns.copy()\n return d\n\n _shared_docs[\n \"pct_change\"\n ] = \"\"\"\n Percentage change between the current and a prior element.\n\n Computes the percentage change from the immediately previous row by\n default. This is useful in comparing the percentage of change in a time\n series of elements.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming percent change.\n fill_method : str, default 'pad'\n How to handle NAs before computing percent changes.\n limit : int, default None\n The number of consecutive NAs to fill before stopping.\n freq : DateOffset, timedelta, or str, optional\n Increment to use from time series API (e.g. 'M' or BDay()).\n **kwargs\n Additional keyword arguments are passed into\n `DataFrame.shift` or `Series.shift`.\n\n Returns\n -------\n chg : Series or DataFrame\n The same type as the calling object.\n\n See Also\n --------\n Series.diff : Compute the difference of two elements in a Series.\n DataFrame.diff : Compute the difference of two elements in a DataFrame.\n Series.shift : Shift the index by some number of periods.\n DataFrame.shift : Shift the index by some number of periods.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series([90, 91, 85])\n >>> s\n 0 90\n 1 91\n 2 85\n dtype: int64\n\n >>> s.pct_change()\n 0 NaN\n 1 0.011111\n 2 -0.065934\n dtype: float64\n\n >>> s.pct_change(periods=2)\n 0 NaN\n 1 NaN\n 2 -0.055556\n dtype: float64\n\n See the percentage change in a Series where filling NAs with last\n valid observation forward to next valid.\n\n >>> s = pd.Series([90, 91, None, 85])\n >>> s\n 0 90.0\n 1 91.0\n 2 NaN\n 3 85.0\n dtype: float64\n\n >>> s.pct_change(fill_method='ffill')\n 0 NaN\n 1 0.011111\n 2 0.000000\n 3 -0.065934\n dtype: float64\n\n **DataFrame**\n\n Percentage change in French franc, Deutsche Mark, and Italian lira from\n 1980-01-01 to 1980-03-01.\n\n >>> df = pd.DataFrame({\n ... 'FR': [4.0405, 4.0963, 4.3149],\n ... 'GR': [1.7246, 1.7482, 1.8519],\n ... 'IT': [804.74, 810.01, 860.13]},\n ... index=['1980-01-01', '1980-02-01', '1980-03-01'])\n >>> df\n FR GR IT\n 1980-01-01 4.0405 1.7246 804.74\n 1980-02-01 4.0963 1.7482 810.01\n 1980-03-01 4.3149 1.8519 860.13\n\n >>> df.pct_change()\n FR GR IT\n 1980-01-01 NaN NaN NaN\n 1980-02-01 0.013810 0.013684 0.006549\n 1980-03-01 0.053365 0.059318 0.061876\n\n Percentage of change in GOOG and APPL stock volume. Shows computing\n the percentage change between columns.\n\n >>> df = pd.DataFrame({\n ... '2016': [1769950, 30586265],\n ... '2015': [1500923, 40912316],\n ... '2014': [1371819, 41403351]},\n ... index=['GOOG', 'APPL'])\n >>> df\n 2016 2015 2014\n GOOG 1769950 1500923 1371819\n APPL 30586265 40912316 41403351\n\n >>> df.pct_change(axis='columns')\n 2016 2015 2014\n GOOG NaN -0.151997 -0.086016\n APPL NaN 0.337604 0.012002\n \"\"\"\n\n @Appender(_shared_docs[\"pct_change\"] % _shared_doc_kwargs)\n def pct_change(self, periods=1, fill_method=\"pad\", limit=None, freq=None, **kwargs):\n # TODO: Not sure if above is correct - need someone to confirm.\n axis = self._get_axis_number(kwargs.pop(\"axis\", self._stat_axis_name))\n if fill_method is None:\n data = self\n else:\n data = self.fillna(method=fill_method, limit=limit, axis=axis)\n\n rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1\n rs = rs.loc[~rs.index.duplicated()]\n rs = rs.reindex_like(data)\n if freq is None:\n mask = isna(com.values_from_object(data))\n np.putmask(rs.values, mask, np.nan)\n return rs\n\n def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):\n if axis is None:\n raise ValueError(\"Must specify 'axis' when aggregating by level.\")\n grouped = self.groupby(level=level, axis=axis, sort=False)\n if hasattr(grouped, name) and skipna:\n return getattr(grouped, name)(**kwargs)\n axis = self._get_axis_number(axis)\n method = getattr(type(self), name)\n applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)\n return grouped.aggregate(applyf)\n\n @classmethod\n def _add_numeric_operations(cls):\n \"\"\"\n Add the operations to the cls; evaluate the doc strings again\n \"\"\"\n\n axis_descr, name, name2 = _doc_parms(cls)\n\n cls.any = _make_logical_function(\n cls,\n \"any\",\n name,\n name2,\n axis_descr,\n _any_desc,\n nanops.nanany,\n _any_see_also,\n _any_examples,\n empty_value=False,\n )\n cls.all = _make_logical_function(\n cls,\n \"all\",\n name,\n name2,\n axis_descr,\n _all_desc,\n nanops.nanall,\n _all_see_also,\n _all_examples,\n empty_value=True,\n )\n\n @Substitution(\n desc=\"Return the mean absolute deviation of the values \"\n \"for the requested axis.\",\n name1=name,\n name2=name2,\n axis_descr=axis_descr,\n min_count=\"\",\n see_also=\"\",\n examples=\"\",\n )\n @Appender(_num_doc)\n def mad(self, axis=None, skipna=None, level=None):\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(\"mad\", axis=axis, level=level, skipna=skipna)\n\n data = self._get_numeric_data()\n if axis == 0:\n demeaned = data - data.mean(axis=0)\n else:\n demeaned = data.sub(data.mean(axis=1), axis=0)\n return np.abs(demeaned).mean(axis=axis, skipna=skipna)\n\n cls.mad = mad\n\n cls.sem = _make_stat_function_ddof(\n cls,\n \"sem\",\n name,\n name2,\n axis_descr,\n \"Return unbiased standard error of the mean over requested \"\n \"axis.\\n\\nNormalized by N-1 by default. This can be changed \"\n \"using the ddof argument\",\n nanops.nansem,\n )\n cls.var = _make_stat_function_ddof(\n cls,\n \"var\",\n name,\n name2,\n axis_descr,\n \"Return unbiased variance over requested axis.\\n\\nNormalized by \"\n \"N-1 by default. This can be changed using the ddof argument\",\n nanops.nanvar,\n )\n cls.std = _make_stat_function_ddof(\n cls,\n \"std\",\n name,\n name2,\n axis_descr,\n \"Return sample standard deviation over requested axis.\"\n \"\\n\\nNormalized by N-1 by default. This can be changed using the \"\n \"ddof argument\",\n nanops.nanstd,\n )\n\n cls.cummin = _make_cum_function(\n cls,\n \"cummin\",\n name,\n name2,\n axis_descr,\n \"minimum\",\n np.minimum.accumulate,\n \"min\",\n np.inf,\n np.nan,\n _cummin_examples,\n )\n cls.cumsum = _make_cum_function(\n cls,\n \"cumsum\",\n name,\n name2,\n axis_descr,\n \"sum\",\n np.cumsum,\n \"sum\",\n 0.0,\n np.nan,\n _cumsum_examples,\n )\n cls.cumprod = _make_cum_function(\n cls,\n \"cumprod\",\n name,\n name2,\n axis_descr,\n \"product\",\n np.cumprod,\n \"prod\",\n 1.0,\n np.nan,\n _cumprod_examples,\n )\n cls.cummax = _make_cum_function(\n cls,\n \"cummax\",\n name,\n name2,\n axis_descr,\n \"maximum\",\n np.maximum.accumulate,\n \"max\",\n -np.inf,\n np.nan,\n _cummax_examples,\n )\n\n cls.sum = _make_min_count_stat_function(\n cls,\n \"sum\",\n name,\n name2,\n axis_descr,\n \"\"\"Return the sum of the values for the requested axis.\\n\n This is equivalent to the method ``numpy.sum``.\"\"\",\n nanops.nansum,\n _stat_func_see_also,\n _sum_examples,\n )\n cls.mean = _make_stat_function(\n cls,\n \"mean\",\n name,\n name2,\n axis_descr,\n \"Return the mean of the values for the requested axis.\",\n nanops.nanmean,\n )\n cls.skew = _make_stat_function(\n cls,\n \"skew\",\n name,\n name2,\n axis_descr,\n \"Return unbiased skew over requested axis.\\n\\nNormalized by N-1.\",\n nanops.nanskew,\n )\n cls.kurt = _make_stat_function(\n cls,\n \"kurt\",\n name,\n name2,\n axis_descr,\n \"Return unbiased kurtosis over requested axis.\\n\\n\"\n \"Kurtosis obtained using Fisher's definition of\\n\"\n \"kurtosis (kurtosis of normal == 0.0). Normalized \"\n \"by N-1.\",\n nanops.nankurt,\n )\n cls.kurtosis = cls.kurt\n cls.prod = _make_min_count_stat_function(\n cls,\n \"prod\",\n name,\n name2,\n axis_descr,\n \"Return the product of the values for the requested axis.\",\n nanops.nanprod,\n examples=_prod_examples,\n )\n cls.product = cls.prod\n cls.median = _make_stat_function(\n cls,\n \"median\",\n name,\n name2,\n axis_descr,\n \"Return the median of the values for the requested axis.\",\n nanops.nanmedian,\n )\n cls.max = _make_stat_function(\n cls,\n \"max\",\n name,\n name2,\n axis_descr,\n \"\"\"Return the maximum of the values for the requested axis.\\n\n If you want the *index* of the maximum, use ``idxmax``. This is\n the equivalent of the ``numpy.ndarray`` method ``argmax``.\"\"\",\n nanops.nanmax,\n _stat_func_see_also,\n _max_examples,\n )\n cls.min = _make_stat_function(\n cls,\n \"min\",\n name,\n name2,\n axis_descr,\n \"\"\"Return the minimum of the values for the requested axis.\\n\n If you want the *index* of the minimum, use ``idxmin``. This is\n the equivalent of the ``numpy.ndarray`` method ``argmin``.\"\"\",\n nanops.nanmin,\n _stat_func_see_also,\n _min_examples,\n )\n\n @classmethod\n def _add_series_or_dataframe_operations(cls):\n \"\"\"\n Add the series or dataframe only operations to the cls; evaluate\n the doc strings again.\n \"\"\"\n\n from pandas.core.window import EWM, Expanding, Rolling, Window\n\n @Appender(Rolling.__doc__)\n def rolling(\n self,\n window,\n min_periods=None,\n center=False,\n win_type=None,\n on=None,\n axis=0,\n closed=None,\n ):\n axis = self._get_axis_number(axis)\n\n if win_type is not None:\n return Window(\n self,\n window=window,\n min_periods=min_periods,\n center=center,\n win_type=win_type,\n on=on,\n axis=axis,\n closed=closed,\n )\n\n return Rolling(\n self,\n window=window,\n min_periods=min_periods,\n center=center,\n win_type=win_type,\n on=on,\n axis=axis,\n closed=closed,\n )\n\n cls.rolling = rolling\n\n @Appender(Expanding.__doc__)\n def expanding(self, min_periods=1, center=False, axis=0):\n axis = self._get_axis_number(axis)\n return Expanding(self, min_periods=min_periods, center=center, axis=axis)\n\n cls.expanding = expanding\n\n @Appender(EWM.__doc__)\n def ewm(\n self,\n com=None,\n span=None,\n halflife=None,\n alpha=None,\n min_periods=0,\n adjust=True,\n ignore_na=False,\n axis=0,\n ):\n axis = self._get_axis_number(axis)\n return EWM(\n self,\n com=com,\n span=span,\n halflife=halflife,\n alpha=alpha,\n min_periods=min_periods,\n adjust=adjust,\n ignore_na=ignore_na,\n axis=axis,\n )\n\n cls.ewm = ewm\n\n @Appender(_shared_docs[\"transform\"] % dict(axis=\"\", **_shared_doc_kwargs))\n def transform(self, func, *args, **kwargs):\n result = self.agg(func, *args, **kwargs)\n if is_scalar(result) or len(result) != len(self):\n raise ValueError(\"transforms cannot produce aggregated results\")\n\n return result\n\n # ----------------------------------------------------------------------\n # Misc methods\n\n _shared_docs[\n \"valid_index\"\n ] = \"\"\"\n Return index for %(position)s non-NA/null value.\n\n Returns\n -------\n scalar : type of index\n\n Notes\n -----\n If all elements are non-NA/null, returns None.\n Also returns None for empty %(klass)s.\n \"\"\"\n\n def _find_valid_index(self, how: str):\n \"\"\"\n Retrieves the index of the first valid value.\n\n Parameters\n ----------\n how : {'first', 'last'}\n Use this parameter to change between the first or last valid index.\n\n Returns\n -------\n idx_first_valid : type of index\n \"\"\"\n\n idxpos = find_valid_index(self._values, how)\n if idxpos is None:\n return None\n return self.index[idxpos]\n\n @Appender(\n _shared_docs[\"valid_index\"] % {\"position\": \"first\", \"klass\": \"Series/DataFrame\"}\n )\n def first_valid_index(self):\n return self._find_valid_index(\"first\")\n\n @Appender(\n _shared_docs[\"valid_index\"] % {\"position\": \"last\", \"klass\": \"Series/DataFrame\"}\n )\n def last_valid_index(self):\n return self._find_valid_index(\"last\")\n\n\ndef _doc_parms(cls):\n \"\"\"Return a tuple of the doc parms.\"\"\"\n axis_descr = (\n f\"{{{', '.join(f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS))}}}\"\n )\n name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else \"scalar\"\n name2 = cls.__name__\n return axis_descr, name, name2\n\n\n_num_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\n Axis for the function to be applied on.\nskipna : bool, default True\n Exclude NA/null values when computing the result.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\nnumeric_only : bool, default None\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data. Not implemented for Series.\n%(min_count)s\\\n**kwargs\n Additional keyword arguments to be passed to the function.\n\nReturns\n-------\n%(name1)s or %(name2)s (if level specified)\\\n%(see_also)s\\\n%(examples)s\n\"\"\"\n\n_num_ddof_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : %(axis_descr)s\nskipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\nddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\nnumeric_only : bool, default None\n Include only float, int, boolean columns. If None, will attempt to use\n everything, then use only numeric data. Not implemented for Series.\n\nReturns\n-------\n%(name1)s or %(name2)s (if level specified)\\n\"\"\"\n\n_bool_doc = \"\"\"\n%(desc)s\n\nParameters\n----------\naxis : {0 or 'index', 1 or 'columns', None}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n * 1 / 'columns' : reduce the columns, return a Series whose index is the\n original index.\n * None : reduce all axes, return a scalar.\n\nbool_only : bool, default None\n Include only boolean columns. If None, will attempt to use everything,\n then use only boolean data. Not implemented for Series.\nskipna : bool, default True\n Exclude NA/null values. If the entire row/column is NA and skipna is\n True, then the result will be %(empty_value)s, as for an empty row/column.\n If skipna is False, then NA are treated as True, because these are not\n equal to zero.\nlevel : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a %(name1)s.\n**kwargs : any, default None\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n%(name1)s or %(name2)s\n If level is specified, then, %(name2)s is returned; otherwise, %(name1)s\n is returned.\n\n%(see_also)s\n%(examples)s\"\"\"\n\n_all_desc = \"\"\"\\\nReturn whether all elements are True, potentially over an axis.\n\nReturns True unless there at least one element within a series or\nalong a Dataframe axis that is False or equivalent (e.g. zero or\nempty).\"\"\"\n\n_all_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> pd.Series([True, True]).all()\nTrue\n>>> pd.Series([True, False]).all()\nFalse\n>>> pd.Series([]).all()\nTrue\n>>> pd.Series([np.nan]).all()\nTrue\n>>> pd.Series([np.nan]).all(skipna=False)\nTrue\n\n**DataFrames**\n\nCreate a dataframe from a dictionary.\n\n>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})\n>>> df\n col1 col2\n0 True True\n1 True False\n\nDefault behaviour checks if column-wise values all return True.\n\n>>> df.all()\ncol1 True\ncol2 False\ndtype: bool\n\nSpecify ``axis='columns'`` to check if row-wise values all return True.\n\n>>> df.all(axis='columns')\n0 True\n1 False\ndtype: bool\n\nOr ``axis=None`` for whether every value is True.\n\n>>> df.all(axis=None)\nFalse\n\"\"\"\n\n_all_see_also = \"\"\"\\\nSee Also\n--------\nSeries.all : Return True if all elements are True.\nDataFrame.any : Return True if one (or more) elements are True.\n\"\"\"\n\n_cnum_doc = \"\"\"\nReturn cumulative %(desc)s over a DataFrame or Series axis.\n\nReturns a DataFrame or Series of the same size containing the cumulative\n%(desc)s.\n\nParameters\n----------\naxis : {0 or 'index', 1 or 'columns'}, default 0\n The index or the name of the axis. 0 is equivalent to None or 'index'.\nskipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n*args, **kwargs :\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n%(name1)s or %(name2)s\n\nSee Also\n--------\ncore.window.Expanding.%(accum_func_name)s : Similar functionality\n but ignores ``NaN`` values.\n%(name2)s.%(accum_func_name)s : Return the %(desc)s over\n %(name2)s axis.\n%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.\n%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.\n%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.\n%(name2)s.cumprod : Return cumulative product over %(name2)s axis.\n\n%(examples)s\"\"\"\n\n_cummin_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cummin()\n0 2.0\n1 NaN\n2 2.0\n3 -1.0\n4 -1.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cummin(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the minimum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cummin()\n A B\n0 2.0 1.0\n1 2.0 NaN\n2 1.0 0.0\n\nTo iterate over columns and find the minimum in each row,\nuse ``axis=1``\n\n>>> df.cummin(axis=1)\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\"\"\"\n\n_cumsum_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cumsum()\n0 2.0\n1 NaN\n2 7.0\n3 6.0\n4 6.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cumsum(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the sum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cumsum()\n A B\n0 2.0 1.0\n1 5.0 NaN\n2 6.0 1.0\n\nTo iterate over columns and find the sum in each row,\nuse ``axis=1``\n\n>>> df.cumsum(axis=1)\n A B\n0 2.0 3.0\n1 3.0 NaN\n2 1.0 1.0\n\"\"\"\n\n_cumprod_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cumprod()\n0 2.0\n1 NaN\n2 10.0\n3 -10.0\n4 -0.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cumprod(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the product\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cumprod()\n A B\n0 2.0 1.0\n1 6.0 NaN\n2 6.0 0.0\n\nTo iterate over columns and find the product in each row,\nuse ``axis=1``\n\n>>> df.cumprod(axis=1)\n A B\n0 2.0 2.0\n1 3.0 NaN\n2 1.0 0.0\n\"\"\"\n\n_cummax_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cummax()\n0 2.0\n1 NaN\n2 5.0\n3 5.0\n4 5.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cummax(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the maximum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cummax()\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 3.0 1.0\n\nTo iterate over columns and find the maximum in each row,\nuse ``axis=1``\n\n>>> df.cummax(axis=1)\n A B\n0 2.0 2.0\n1 3.0 NaN\n2 1.0 1.0\n\"\"\"\n\n_any_see_also = \"\"\"\\\nSee Also\n--------\nnumpy.any : Numpy version of this method.\nSeries.any : Return whether any element is True.\nSeries.all : Return whether all elements are True.\nDataFrame.any : Return whether any element is True over requested axis.\nDataFrame.all : Return whether all elements are True over requested axis.\n\"\"\"\n\n_any_desc = \"\"\"\\\nReturn whether any element is True, potentially over an axis.\n\nReturns False unless there at least one element within a series or\nalong a Dataframe axis that is True or equivalent (e.g. non-zero or\nnon-empty).\"\"\"\n\n_any_examples = \"\"\"\\\nExamples\n--------\n**Series**\n\nFor Series input, the output is a scalar indicating whether any element\nis True.\n\n>>> pd.Series([False, False]).any()\nFalse\n>>> pd.Series([True, False]).any()\nTrue\n>>> pd.Series([]).any()\nFalse\n>>> pd.Series([np.nan]).any()\nFalse\n>>> pd.Series([np.nan]).any(skipna=False)\nTrue\n\n**DataFrame**\n\nWhether each column contains at least one True element (the default).\n\n>>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [0, 2], \"C\": [0, 0]})\n>>> df\n A B C\n0 1 0 0\n1 2 2 0\n\n>>> df.any()\nA True\nB True\nC False\ndtype: bool\n\nAggregating over the columns.\n\n>>> df = pd.DataFrame({\"A\": [True, False], \"B\": [1, 2]})\n>>> df\n A B\n0 True 1\n1 False 2\n\n>>> df.any(axis='columns')\n0 True\n1 True\ndtype: bool\n\n>>> df = pd.DataFrame({\"A\": [True, False], \"B\": [1, 0]})\n>>> df\n A B\n0 True 1\n1 False 0\n\n>>> df.any(axis='columns')\n0 True\n1 False\ndtype: bool\n\nAggregating over the entire DataFrame with ``axis=None``.\n\n>>> df.any(axis=None)\nTrue\n\n`any` for an empty DataFrame is an empty Series.\n\n>>> pd.DataFrame([]).any()\nSeries([], dtype: bool)\n\"\"\"\n\n_shared_docs[\n \"stat_func_example\"\n] = \"\"\"\n\nExamples\n--------\n>>> idx = pd.MultiIndex.from_arrays([\n... ['warm', 'warm', 'cold', 'cold'],\n... ['dog', 'falcon', 'fish', 'spider']],\n... names=['blooded', 'animal'])\n>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)\n>>> s\nblooded animal\nwarm dog 4\n falcon 2\ncold fish 0\n spider 8\nName: legs, dtype: int64\n\n>>> s.{stat_func}()\n{default_output}\n\n{verb} using level names, as well as indices.\n\n>>> s.{stat_func}(level='blooded')\nblooded\nwarm {level_output_0}\ncold {level_output_1}\nName: legs, dtype: int64\n\n>>> s.{stat_func}(level=0)\nblooded\nwarm {level_output_0}\ncold {level_output_1}\nName: legs, dtype: int64\"\"\"\n\n_sum_examples = _shared_docs[\"stat_func_example\"].format(\n stat_func=\"sum\", verb=\"Sum\", default_output=14, level_output_0=6, level_output_1=8\n)\n\n_sum_examples += \"\"\"\n\nBy default, the sum of an empty or all-NA Series is ``0``.\n\n>>> pd.Series([]).sum() # min_count=0 is the default\n0.0\n\nThis can be controlled with the ``min_count`` parameter. For example, if\nyou'd like the sum of an empty series to be NaN, pass ``min_count=1``.\n\n>>> pd.Series([]).sum(min_count=1)\nnan\n\nThanks to the ``skipna`` parameter, ``min_count`` handles all-NA and\nempty series identically.\n\n>>> pd.Series([np.nan]).sum()\n0.0\n\n>>> pd.Series([np.nan]).sum(min_count=1)\nnan\"\"\"\n\n_max_examples = _shared_docs[\"stat_func_example\"].format(\n stat_func=\"max\", verb=\"Max\", default_output=8, level_output_0=4, level_output_1=8\n)\n\n_min_examples = _shared_docs[\"stat_func_example\"].format(\n stat_func=\"min\", verb=\"Min\", default_output=0, level_output_0=2, level_output_1=0\n)\n\n_stat_func_see_also = \"\"\"\n\nSee Also\n--------\nSeries.sum : Return the sum.\nSeries.min : Return the minimum.\nSeries.max : Return the maximum.\nSeries.idxmin : Return the index of the minimum.\nSeries.idxmax : Return the index of the maximum.\nDataFrame.sum : Return the sum over the requested axis.\nDataFrame.min : Return the minimum over the requested axis.\nDataFrame.max : Return the maximum over the requested axis.\nDataFrame.idxmin : Return the index of the minimum over the requested axis.\nDataFrame.idxmax : Return the index of the maximum over the requested axis.\"\"\"\n\n_prod_examples = \"\"\"\n\nExamples\n--------\nBy default, the product of an empty or all-NA Series is ``1``\n\n>>> pd.Series([]).prod()\n1.0\n\nThis can be controlled with the ``min_count`` parameter\n\n>>> pd.Series([]).prod(min_count=1)\nnan\n\nThanks to the ``skipna`` parameter, ``min_count`` handles all-NA and\nempty series identically.\n\n>>> pd.Series([np.nan]).prod()\n1.0\n\n>>> pd.Series([np.nan]).prod(min_count=1)\nnan\"\"\"\n\n_min_count_stub = \"\"\"\\\nmin_count : int, default 0\n The required number of valid values to perform the operation. If fewer than\n ``min_count`` non-NA values are present the result will be NA.\n\n .. versionadded:: 0.22.0\n\n Added with the default being 0. This means the sum of an all-NA\n or empty Series is 0, and the product of an all-NA or empty\n Series is 1.\n\"\"\"\n\n\ndef _make_min_count_stat_function(\n cls, name, name1, name2, axis_descr, desc, f, see_also: str = \"\", examples: str = \"\"\n):\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n min_count=_min_count_stub,\n see_also=see_also,\n examples=examples,\n )\n @Appender(_num_doc)\n def stat_func(\n self,\n axis=None,\n skipna=None,\n level=None,\n numeric_only=None,\n min_count=0,\n **kwargs,\n ):\n if name == \"sum\":\n nv.validate_sum(tuple(), kwargs)\n elif name == \"prod\":\n nv.validate_prod(tuple(), kwargs)\n else:\n nv.validate_stat_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(\n name, axis=axis, level=level, skipna=skipna, min_count=min_count\n )\n return self._reduce(\n f,\n name,\n axis=axis,\n skipna=skipna,\n numeric_only=numeric_only,\n min_count=min_count,\n )\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_stat_function(\n cls, name, name1, name2, axis_descr, desc, f, see_also: str = \"\", examples: str = \"\"\n):\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n min_count=\"\",\n see_also=see_also,\n examples=examples,\n )\n @Appender(_num_doc)\n def stat_func(\n self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs\n ):\n if name == \"median\":\n nv.validate_median(tuple(), kwargs)\n else:\n nv.validate_stat_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)\n return self._reduce(\n f, name, axis=axis, skipna=skipna, numeric_only=numeric_only\n )\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f):\n @Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)\n @Appender(_num_ddof_doc)\n def stat_func(\n self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs\n ):\n nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)\n if skipna is None:\n skipna = True\n if axis is None:\n axis = self._stat_axis_number\n if level is not None:\n return self._agg_by_level(\n name, axis=axis, level=level, skipna=skipna, ddof=ddof\n )\n return self._reduce(\n f, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof\n )\n\n return set_function_name(stat_func, name, cls)\n\n\ndef _make_cum_function(\n cls,\n name,\n name1,\n name2,\n axis_descr,\n desc,\n accum_func,\n accum_func_name,\n mask_a,\n mask_b,\n examples,\n):\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n accum_func_name=accum_func_name,\n examples=examples,\n )\n @Appender(_cnum_doc)\n def cum_func(self, axis=None, skipna=True, *args, **kwargs):\n skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)\n if axis is None:\n axis = self._stat_axis_number\n else:\n axis = self._get_axis_number(axis)\n\n y = com.values_from_object(self).copy()\n d = self._construct_axes_dict()\n d[\"copy\"] = False\n\n if issubclass(y.dtype.type, (np.datetime64, np.timedelta64)):\n # numpy 1.18 started sorting NaTs at the end instead of beginning,\n # so we need to work around to maintain backwards-consistency.\n orig_dtype = y.dtype\n if accum_func == np.minimum.accumulate:\n # Note: the accum_func comparison fails as an \"is\" comparison\n # Note that \"y\" is always a copy, so we can safely modify it\n mask = isna(self)\n y = y.view(\"i8\")\n y[mask] = np.iinfo(np.int64).max\n\n result = accum_func(y.view(\"i8\"), axis).view(orig_dtype)\n if skipna:\n mask = isna(self)\n np.putmask(result, mask, iNaT)\n elif accum_func == np.minimum.accumulate:\n # Restore NaTs that we masked previously\n nz = (~np.asarray(mask)).nonzero()[0]\n if len(nz):\n # everything up to the first non-na entry stays NaT\n result[: nz[0]] = iNaT\n\n if self.ndim == 1:\n # restore dt64tz dtype\n d[\"dtype\"] = self.dtype\n\n elif skipna and not issubclass(y.dtype.type, (np.integer, np.bool_)):\n mask = isna(self)\n np.putmask(y, mask, mask_a)\n result = accum_func(y, axis)\n np.putmask(result, mask, mask_b)\n else:\n result = accum_func(y, axis)\n\n return self._constructor(result, **d).__finalize__(self)\n\n return set_function_name(cum_func, name, cls)\n\n\ndef _make_logical_function(\n cls, name, name1, name2, axis_descr, desc, f, see_also, examples, empty_value\n):\n @Substitution(\n desc=desc,\n name1=name1,\n name2=name2,\n axis_descr=axis_descr,\n see_also=see_also,\n examples=examples,\n empty_value=empty_value,\n )\n @Appender(_bool_doc)\n def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):\n nv.validate_logical_func(tuple(), kwargs, fname=name)\n if level is not None:\n if bool_only is not None:\n raise NotImplementedError(\n \"Option bool_only is not implemented with option level.\"\n )\n return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)\n return self._reduce(\n f,\n name,\n axis=axis,\n skipna=skipna,\n numeric_only=bool_only,\n filter_type=\"bool\",\n )\n\n return set_function_name(logical_func, name, cls)\n\n\n# install the indexes\nfor _name, _indexer in indexing.get_indexers_list():\n NDFrame._create_indexer(_name, _indexer)\n"
] |
[
[
"pandas.tseries.frequencies.to_offset",
"pandas.util._validators.validate_bool_kwarg",
"pandas.core.dtypes.inference.is_hashable",
"numpy.unique",
"numpy.asanyarray",
"pandas.core.dtypes.common.is_re_compilable",
"pandas.concat",
"pandas.core.dtypes.common.is_list_like",
"pandas.compat.numpy.function.validate_cum_func_with_skipna",
"pandas.io.pickle.to_pickle",
"numpy.array",
"pandas.core.dtypes.common.is_period_arraylike",
"pandas.io.formats.format.DataFrameFormatter",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.window.Window",
"pandas.core.dtypes.missing.isna",
"pandas.io.sql.to_sql",
"pandas.Series",
"numpy.asarray",
"pandas.io.json.to_json",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"numpy.iinfo",
"pandas.core.common.SettingWithCopyError",
"pandas.compat._optional.import_optional_dependency",
"pandas._config.config.is_nonnegative_int",
"numpy.putmask",
"pandas.core.construction.create_series_with_explicit_dtype",
"pandas.core.common.maybe_box_datetimelike",
"pandas._libs.Timestamp",
"pandas.core.dtypes.common.ensure_str",
"pandas.core.indexes.api.Index",
"numpy.errstate",
"pandas.core.resample.resample",
"pandas.core.common.random_state",
"pandas.core.dtypes.common.is_integer",
"pandas._libs.lib.item_from_zerodim",
"pandas.core.indexes.datetimes.DatetimeIndex",
"pandas.core.window.EWM",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas.core.missing.find_valid_index",
"pandas.core.dtypes.missing.notna",
"pandas.DataFrame",
"pandas.core.common.pipe",
"pandas.core.indexes.api.RangeIndex",
"pandas.core.common.values_from_object",
"pandas.io.pytables.to_hdf",
"pandas.util._decorators.Substitution",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.errors.AbstractMethodError",
"pandas.core.groupby.groupby.get_groupby",
"pandas.core.dtypes.common.is_number",
"pandas.core.dtypes.common.ensure_int64",
"pandas.core.dtypes.common.is_dict_like",
"pandas.core.window.Rolling",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.common.pandas_dtype",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"numpy.isnan",
"pandas.core.algorithms.rank",
"pandas.core.missing.mask_missing",
"pandas.core.missing.get_fill_func",
"pandas.util._decorators.rewrite_axis_style_signature",
"pandas.core.missing.clean_fill_method",
"pandas.core.common.get_rename_function",
"pandas.core.dtypes.common.is_bool",
"pandas.core.dtypes.common.is_scalar",
"pandas.io.formats.csvs.CSVFormatter",
"pandas.io.formats.format.format_percentiles",
"pandas.util._validators.validate_fillna_kwargs",
"pandas.core.resample.asfreq",
"pandas.core.indexes.api.ensure_index",
"pandas.compat.numpy.function.validate_clip_with_axis",
"pandas.core.computation.common._remove_spaces_column_name",
"pandas.util._validators.validate_percentile",
"pandas.core.indexes.period.Period",
"numpy.any",
"pandas.core.indexing.get_indexers_list",
"pandas.io.clipboards.to_clipboard",
"pandas.core.window.Expanding",
"pandas.compat.set_function_name",
"pandas.core.dtypes.common.is_float",
"pandas.core.dtypes.common.is_datetime64_any_dtype",
"pandas.core.tools.datetimes.to_datetime",
"pandas.core.common.maybe_make_list",
"pandas._config.config.get_option",
"pandas.core.common.count_not_none",
"numpy.abs",
"pandas.core.ops._align_method_FRAME",
"pandas.io.formats.excel.ExcelFormatter",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.common.apply_if_callable",
"numpy.prod",
"pandas.core.common.index_labels_to_array"
]
] |
valenescobar/MyST
|
[
"0b9b1f1638cc74c463280597d48627bbf80b547c"
] |
[
"Notas_Python/Notas_AnalisisTecnico/AnalisisTecnico.py"
] |
[
"\n# -- ------------------------------------------------------------- Importar modulos a utilizar -- #\n# -- ------------------------------------------------------------- --------------------------- -- #\n\nimport ta as ta\nimport pandas as pd\nfrom oandapyV20 import API\nimport oandapyV20.endpoints.instruments as instruments\nimport plotly.graph_objs as go\nimport plotly as py\n\n# -- ------------------------------------------------------------------- Parametros para OANDA -- #\n# -- ------------------------------------------------------------------- --------------------- -- #\n\npd.set_option('display.max_rows', 10)\npd.set_option('display.max_columns', 200)\npd.set_option('display.width', 1000)\n\n# -- ------------------------------------------------------------------- Parametros para OANDA -- #\n# -- ------------------------------------------------------------------- --------------------- -- #\n\nA1_OA_Da = 17 # Day Align\nA1_OA_Ta = \"America/Mexico_City\" # Time Align\n\nA1_OA_Ai = \"101-004-2221697-001\" # Id de cuenta\nA1_OA_At = \"practice\" # Tipo de cuenta\n\nA1_OA_In = \"USD_MXN\" # Instrumento\nA1_OA_Gn = \"H1\" # Granularidad de velas\n\nA1_OA_Ak = \"a\"+ \"da4a61b0d5bc0e5939365e01450b614\" + \"-4121f84f01ad78942c46fc3ac777baa\" + \"6\"\n\nF1 = \"2017-01-01T00:00:00Z\"\nF2 = \"2017-02-01T00:00:00Z\"\n\n# -- ---------------------------------------------------------------- Inicializar API de OANDA -- #\n# -- ---------------------------------------------------------------- ------------------------ -- #\n\napi = API(access_token=A1_OA_Ak)\n\n# -- -------------------------------------------------------------- Obtener precios historicos -- #\n# -- -------------------------------------------------------------- -------------------------- -- #\n\nparams = {\"granularity\": A1_OA_Gn, \"price\": \"M\", \"dailyAlignment\": A1_OA_Da,\n \"alignmentTimezone\": A1_OA_Ta, \"from\": F1, \"to\": F2}\n\nA1_Req1 = instruments.InstrumentsCandles(instrument=A1_OA_In, params=params)\nA1_Hist = api.request(A1_Req1)\n\nlista = []\nfor i in range(len(A1_Hist['candles'])-1):\n lista.append({'TimeStamp': A1_Hist['candles'][i]['time'],\n 'Open': A1_Hist['candles'][i]['mid']['o'],\n 'High': A1_Hist['candles'][i]['mid']['h'],\n 'Low': A1_Hist['candles'][i]['mid']['l'],\n 'Close': A1_Hist['candles'][i]['mid']['c']})\n\npd_hist = pd.DataFrame(lista)\npd_hist = pd_hist[['TimeStamp', 'Open', 'High', 'Low', 'Close']]\npd_hist['TimeStamp'] = pd.to_datetime(pd_hist['TimeStamp'])\n\n\n# -- ---------------------------------------------------------------- Agregar analisis tecnico -- #\n# -- ---------------------------------------------------------------- ------------------------ -- #\n\npd_hist['ema_Close'] = ta.ema(series=pd_hist['Close'], periods=20)\n\n# -- ------------------------------------------------------------------- --------------------- -- #\n# -- ------------------------------------------------------------------- --------------------- -- #\n\ntrace0 = go.Scatter(x=pd_hist['TimeStamp'], y=pd_hist['ema_Close'],\n mode='lines', name='lines0')\n\ntrace1 = go.Scatter(x=pd_hist['TimeStamp'], y=pd_hist['Close'],\n mode='lines', name='lines1')\n\ndata = [trace0, trace1]\n\npy.offline.plot(data, filename='file.html')\n"
] |
[
[
"pandas.set_option",
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
margaridaCF/FlyingOctomap_code
|
[
"2ecba75ee58fadd882887f394819433d9b368283"
] |
[
"_generate_plots/time_vs_pathLegth_lazyThetaStar.py"
] |
[
"# -*- coding: utf-8 -*-\nimport plotly.plotly as py\nfrom plotly.graph_objs import *\nimport plotly\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objs as go\nimport csv\nimport plotly.io as pio\n\nfrom collections import defaultdict\n\n \ndef extract_lazy_theta_star_data(csv_filepath):\n data = pd.read_csv(csv_filepath)\n return data\n\ndef create_polyFitTrace(data, trace_color, polynomial_degree, start, label, legend=True):\n # calculate polynomial\n x_original = data['path_lenght_total_meters']\n y_original = data['computation_time_millis']\n\n z = np.polyfit(x_original, y_original, polynomial_degree)\n f = np.poly1d(z)\n\n # calculate new x's and y's\n x_new = np.linspace(start, max(x_original)+5, 120)\n y_new = f(x_new)\n degree_sym = u'\\u00b0'.encode('utf-8').strip()\n trace = go.Scatter(\n x=x_new,\n y=y_new,\n mode='lines',\n marker=go.Marker(color=trace_color),\n name=str(polynomial_degree)+degree_sym+' poly fit of '+label,\n showlegend=legend\n )\n\n return trace\n\n\ndef plot_polynomialFit_scatter(dataOriginal, dataSparse, dataOrtho, security_margin, polynomial_degree, start):\n traceOriginal = go.Scatter(\n x = dataOriginal['path_lenght_total_meters'],\n y = dataOriginal['computation_time_millis'],\n mode = 'markers',\n marker=go.Marker(color='rgb(255,255,191)'),\n name='Lazy Theta * Offline'\n )\n\n traceSparse = go.Scatter(\n x = dataSparse['path_lenght_total_meters'],\n y = dataSparse['computation_time_millis'],\n mode = 'markers',\n marker=go.Marker(color='rgb(252,141,89)'),\n name='Sparse Neighbors (SN)'\n )\n\n traceOrtho = go.Scatter(\n x = dataOrtho['path_lenght_total_meters'],\n y = dataOrtho['computation_time_millis'],\n mode = 'markers',\n marker=go.Marker(color='rgb(145,191,219)'),\n name='SN + Geometric obstacle avoidance'\n )\n trace_poly_sparse = create_polyFitTrace(dataSparse, 'rgb(252,141,89)', polynomial_degree, start)\n trace_poly_ortho = create_polyFitTrace(dataOrtho, 'rgb(145,191,219)', polynomial_degree, start)\n trace_poly_original = create_polyFitTrace(dataOriginal, 'rgb(255,255,191)', polynomial_degree, start)\n\n data = [traceSparse, traceOrtho, traceOriginal, trace_poly_sparse, trace_poly_ortho, trace_poly_original]\n layout = go.Layout(\n title='Time to find path with ' + security_margin + ' m security margin',\n xaxis=dict(\n title='Distance between points (in meters)',\n titlefont=dict(\n family='Courier New, monospace',\n size=18,\n color='#7f7f7f'\n )\n ),\n yaxis=dict(\n title='Computation time (seconds)',\n titlefont=dict(\n family='Courier New, monospace',\n size=18,\n color='#7f7f7f'\n )\n )\n )\n fig=dict(data=data, layout=layout)\n plotly.offline.iplot(fig, filename='./scatter_time_vs_pathLength_fit', image='png')\n \n\ndef plot_polynomialFit_dynamicTraces(datasets, datasets_labels, datsets_colors, polynomial_degree, start, title, fname, yMax, yUnits='seconds'):\n\n traces = []\n for x in xrange(0,len(datasets)):\n traces.append(go.Scatter(\n x = datasets[x]['path_lenght_total_meters'],\n y = datasets[x]['computation_time_millis'],\n mode = 'markers',\n marker=go.Marker(color=datsets_colors[x]),\n name=datasets_labels[x]\n )\n )\n traces.append(create_polyFitTrace(datasets[x], datsets_colors[x], polynomial_degree, start, datasets_labels[x]))\n\n range__ = [0, yMax]\n layout = go.Layout(\n title=title,\n xaxis=dict(\n title='Distance between points (in meters)',\n titlefont=dict(\n family='Courier New, monospace',\n size=18,\n color='#7f7f7f'\n )\n ),\n yaxis=dict(\n title='Computation time ('+yUnits+')',\n range = [0, yMax],\n titlefont=dict(\n family='Courier New, monospace',\n size=18,\n color='#7f7f7f'\n )\n )\n )\n fig=dict(data=traces, layout=layout)\n plotly.offline.iplot(fig, filename='./scatter_time_vs_pathLength_fit_'+fname, image='png')\n\ndef plot_scatter_dynamicTraces(datasets, datasets_labels, datsets_colors, title):\n traces = []\n for x in xrange(0,len(datasets)):\n traces.append(go.Scatter(\n x = datasets[x]['path_lenght_total_meters'],\n y = datasets[x]['computation_time_millis'],\n mode = 'markers',\n marker=go.Marker(color=datsets_colors[x]),\n name=datasets_labels[x]\n )\n )\n \n layout = go.Layout(\n title=title,\n xaxis=dict(\n title='Distance between points (in meters)',\n titlefont=dict(\n family='Courier New, monospace',\n size=18,\n color='#7f7f7f'\n )\n ),\n yaxis=dict(\n title='Computation time (seconds)',\n titlefont=dict(\n family='Courier New, monospace',\n size=18,\n color='#7f7f7f'\n )\n )\n )\n fig=dict(data=traces, layout=layout)\n plotly.offline.iplot(fig, filename='./scatter_time_vs_pathLength', image='png')\n pio.write_image(fig, '/home/mfaria/Downloads/plot_scatter_dynamicTraces.svg', scale=2)\n\ndef filterFailed(data):\n data.drop(data[data['success'] == 0].index, inplace=True)\n data.reset_index(drop=True, inplace=True)\n\ndef filterTooLong(data):\n data.drop(data[data['path_lenght_total_meters'] > 100].index, inplace=True)\n data.reset_index(drop=True, inplace=True)\n\ndef calculateObstacleDensity(row):\n return row.obstacle_hit_count * 100 / row.total_obstacle_checks\n\n\ndef plot_obstacleDensity_dynamicTraces(datasets, datasets_labels, datsets_colors, title):\n traces = []\n for x in xrange(0,len(datasets)):\n traces.append(\n Box(\n y=datasets[x].apply (lambda row: calculateObstacleDensity (row),axis=1), \n name=datasets_labels[x], \n boxpoints='all', \n jitter=0.3, \n pointpos=-1.8))\n\n \n layout = Layout(\n title=title,\n yaxis=dict(\n title='Obstacle presence (%)',\n titlefont=dict(\n family='Courier New, monospace',\n size=18,\n color='#7f7f7f'\n )\n ),\n #showlegend=False\n )\n fig=dict(data=traces, layout=layout)\n plotly.offline.iplot(fig)\n pio.write_image(fig, '/home/mfaria/Downloads/box_obstacleDensity.svg')\n\ndef plot_obstacleDetection(csv_filepathSparse, csv_filepathOrtho, margin, yMax, seconds=False):\n dataSparse = pd.read_csv(csv_filepathSparse)\n dataOrtho = pd.read_csv(csv_filepathOrtho)\n\n if(seconds):\n dataSparse.loc[:,'computation_time_millis'] /= 1000\n dataOrtho.loc[:,'computation_time_millis'] /= 1000\n yUnits = \"seconds\"\n else:\n yUnits = \"millis\"\n\n\n datasets_labels = ['3d Discretization', 'Geometric Definition']\n datsets_colors = ['rgb(252,141,89)', 'rgb(145,191,219)']\n polynomial_degree = 2\n start = 0\n title = \"Processing time for obstacle detection with a \"+str(margin)+\"-meter safety margin\"\n datasets = [dataSparse, dataOrtho]\n plot_polynomialFit_dynamicTraces(datasets, datasets_labels, datsets_colors, polynomial_degree, start, title, \"obstDetect_\"+str(margin)+\"m\", yMax, yUnits=yUnits)\n\n\n# variables= {}\n# execfile( \"time_vs_pathLegth_lazyThetaStar.py\", variables )\n"
] |
[
[
"numpy.poly1d",
"numpy.polyfit",
"pandas.read_csv"
]
] |
YouyangShen/Hierarchical-Localization
|
[
"cdbb38fc12ea046fb05764a62bd699e7f9941806"
] |
[
"hloc/read.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 24 20:03:08 2022\n\n@author: Youyang Shen\n\"\"\"\n\nimport h5py\nimport numpy as np\nLONG = \"feats-LoFTR-r1024_matches-loftr_pairs-netvlad.h5\"\nSHORT = 'feats-LoFTR-r1024.h5'\n\ndef update_two_h5_files(filename_short = SHORT, filename_long = LONG):\n \n print(filename_short,filename_long)\n kpts_sum = {}\n with h5py.File(filename_long, \"r\") as f:\n # List all groups\n keys_list = list(f.keys())\n # print(f'length of keys list is {len(keys_list)}')\n for i,item in enumerate(keys_list): # first level\n # print(f'The {i}-th item in keys list is {item}')\n \n # print(list(f[item]))\n keys_list2 = list(f[item])\n kpts0 = []\n for j,item2 in enumerate(keys_list2): #second level \n # print(list(f[item][item2]))\n \n keys_list3 = list(f[item][item2])\n if len(keys_list3)==3:\n matches0 = list(f[item][item2][keys_list3[0]])\n matches1 = list(f[item][item2][keys_list3[1]])\n for item3 in matches0:\n item3 = item3.tolist()\n if item3 not in kpts0:\n kpts0.append(item3)\n kpts_sum[item] = kpts0\n \n #second round for matches1\n \n keys_list = list(f.keys())\n \n # compliment keypoints sum with data from matches1\n for item in keys_list:\n keys_list2 = list(f[item])\n for item2 in keys_list2:\n keys_list3 = list(f[item][item2])\n matches1 = list(f[item][item2][keys_list3[1]])\n for item3 in matches1:\n item3 = item3.tolist()\n if item2 not in kpts_sum:\n kpts_sum[item2] = []\n if item3 not in kpts_sum[item2]:\n kpts_sum[item2].append(item3) \n \n f.close()\n print('1. keys sum finished!')\n \n #save to file\n for item, value in kpts_sum.items():\n with h5py.File(filename_short, 'a') as fd: \n if fd.get(item):\n grp = fd.get(item)\n else:\n grp = fd.create_group(str(item)) # first level group\n \n # if fd[item]['keypoints']:\n # # if dataset exists, delete\n # del fd[item]['keypoints']\n \n grp.create_dataset('keypoints', data=np.array(kpts_sum[item],dtype=float))\n \n \n \n fd.close()\n print('2. keypoints updated!')\n\n \n #update long by long\n\n with h5py.File(filename_long, \"r\") as f:\n keys_list = list(f.keys())\n matches_sum = {}\n scores_sum = {}\n for item in keys_list:\n matches_sum[item] = {}\n scores_sum[item] = {}\n keys_list2 = list(f[item])\n for item2 in keys_list2:\n matches_sum[item][item2] = []\n scores_sum[item][item2] = []\n \n keys_list3 = list(f[item][item2])\n matches0 = list(f[item][item2][keys_list3[0]]) # list of points in A that has a match in B\n matches1 = list(f[item][item2][keys_list3[1]]) # list of points in B that has a match in A\n matching_scores0 = list(f[item][item2][keys_list3[2]]) # matching score\n keypoints_A = kpts_sum[item] # all points of A\n keypoints_B = kpts_sum[item2] # all points of B\n result = [-1] * len(keypoints_A) \n result_score = [0] * len(keypoints_A) \n for i in range(len(matches0)):\n point_A = matches0[i].tolist()\n point_B = matches1[i].tolist()\n \n idx_A = keypoints_A.index(point_A)\n idx_B = keypoints_B.index(point_B)\n result[idx_A] = idx_B\n result_score[idx_A] = matching_scores0[i]\n \n matches_sum[item][item2] = result\n scores_sum[item][item2] = result_score\n\n f.close()\n print('3. matches sum and scores sum!')\n \n for item, value in matches_sum.items():\n with h5py.File(filename_long, 'a') as fd: \n if fd[item]:\n grp = fd[item]\n else: \n grp = fd.create_group(str(item)) # first level group \n for item2, v in value.items():\n \n if fd[item][item2]:\n subgrp = fd[item][item2]\n else: \n subgrp = grp.create_group(str(item2))\n if fd[item][item2]['matches0']:\n del fd[item][item2]['matches0']\n if fd[item][item2]['matching_scores0']:\n del fd[item][item2]['matching_scores0']\n \n subgrp.create_dataset('matches0', data=matches_sum[item][item2])\n subgrp.create_dataset('matching_scores0', data=scores_sum[item][item2]) \n \n fd.close()\n print('4. matches0 and scores0 updated!')\n\n# #%% save to h5\n# file_name = 'feats-LoFTR-r1024_matches-loftr_pairs-db-covis20.h5'\n\n# for item, value in matches_sum.items():\n# with h5py.File(str(file_name), 'a') as fd: \n# grp = fd.create_group(str(item)) # first level group \n# for item2, v in value.items():\n# subgrp = grp.create_group(str(item2))\n# subgrp.create_dataset('matches0', data=matches_sum[item][item2])\n# subgrp.create_dataset('matching_scores0', data=scores_sum[item][item2]) \n \n# fd.close()\n \n# print('finished matching load')\n\n\n#%%\n\n\ndef read_saved_file():\n import h5py\n LONG = \"feats-LoFTR-r1024_matches-loftr_pairs-netvlad.h5\"\n SHORT = 'feats-LoFTR-r1024.h5'\n \n with h5py.File(LONG, \"r\") as f: \n keys_list = list(f.keys())\n for item in keys_list: # level 1\n keys_list2 = list(f[item])\n for item2 in keys_list2:\n keys_list3 = list(f[item][item2])\n matches0 = list(f[item][item2]['matches0'])\n scores0 = list(f[item][item2]['matching_scores0'])\n \n f.close()\n"
] |
[
[
"numpy.array"
]
] |
subhayu99/tardis
|
[
"7792e722f929d661244a819332a031cee7d1b3bf"
] |
[
"tardis/io/atom_data/base.py"
] |
[
"# atomic model\n\n\nimport logging\nimport numpy as np\nimport pandas as pd\n\nfrom scipy import interpolate\nfrom collections import OrderedDict\nfrom astropy import units as u\nfrom tardis import constants as const\nfrom astropy.units import Quantity\nfrom tardis.io.atom_data.util import resolve_atom_data_fname\n\n\nclass AtomDataNotPreparedError(Exception):\n pass\n\n\nclass AtomDataMissingError(Exception):\n pass\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass AtomData(object):\n \"\"\"\n Class for storing atomic data\n\n Parameters\n ----------\n atom_data : pandas.DataFrame\n A DataFrame containing the *basic atomic data* with:\n index : atomic_number;\n columns : symbol, name, mass[u].\n ionization_data : pandas.DataFrame\n A DataFrame containing the *ionization data* with:\n index : atomic_number, ion_number;\n columns : ionization_energy[eV].\n It is important to note here is that `ion_number` describes the *final ion state*\n e.g. H I - H II is described with ion=1\n levels : pandas.DataFrame\n A DataFrame containing the *levels data* with:\n index : numerical index;\n columns : atomic_number, ion_number, level_number, energy[eV], g[1], metastable.\n lines : pandas.DataFrame\n A DataFrame containing the *lines data* with:\n index : numerical index;\n columns : line_id, atomic_number, ion_number, level_number_lower, level_number_upper,\n wavelength[angstrom], nu[Hz], f_lu[1], f_ul[1], B_ul[?], B_ul[?], A_ul[1/s].\n macro_atom_data :\n A DataFrame containing the *macro atom data* with:\n index : numerical index;\n columns : atomic_number, ion_number, source_level_number, destination_level_number,\n transition_line_id, transition_type, transition_probability;\n macro_atom_references :\n A DataFrame containing the *macro atom references* with:\n index : numerical index;\n columns : atomic_number, ion_number, source_level_number, count_down, count_up, count_total.\n Refer to the docs: http://tardis.readthedocs.io/en/latest/physics/plasma/macroatom.html\n collision_data : (pandas.DataFrame, np.array)\n A DataFrame containing the *electron collisions data* with:\n index : atomic_number, ion_number, level_number_lower, level_number_upper;\n columns : e_col_id, delta_e, g_ratio, c_ul;\n collision_data_temperatures : np.array\n An array with the collision temperatures.\n zeta_data :\n A DataFrame containing the *zeta data* for the\n nebular ionization calculation\n (i.e., the fraction of recombinations that go directly to the\n ground state) with:\n index : atomic_number, ion_charge;\n columns : temperatures[K]\n synpp_refs : ?\n photoionization_data : pandas.DataFrame\n A DataFrame containing the *photoionization data* with:\n index : numerical index;\n columns : atomic_number, ion_number, level_number, nu[Hz], x_sect[cm^2]\n\n Attributes\n ----------\n prepared : bool\n atom_data : pandas.DataFrame\n ionization_data : pandas.DataFrame\n macro_atom_data_all : pandas.DataFrame\n macro_atom_references_all : pandas.DataFrame\n collision_data : pandas.DataFrame\n collision_data_temperatures : numpy.array\n zeta_data : pandas.DataFrame\n synpp_refs : pandas.DataFrame\n symbol2atomic_number : OrderedDict\n atomic_number2symbol : OrderedDict\n photoionization_data : pandas.DataFrame\n\n Methods\n -------\n from_hdf\n prepare_atom_data\n\n Notes\n -----\n 1. The units of some columns are given in the square brackets. They are **NOT** the parts of columns' names!\n \"\"\"\n\n hdf_names = [\n \"atom_data\",\n \"ionization_data\",\n \"levels\",\n \"lines\",\n \"macro_atom_data\",\n \"macro_atom_references\",\n \"zeta_data\",\n \"collision_data\",\n \"collision_data_temperatures\",\n \"synpp_refs\",\n \"photoionization_data\",\n ]\n\n # List of tuples of the related dataframes.\n # Either all or none of the related dataframes must be given\n related_groups = [\n (\"macro_atom_data_all\", \"macro_atom_references_all\"),\n (\"collision_data\", \"collision_data_temperatures\"),\n ]\n\n @classmethod\n def from_hdf(cls, fname=None):\n \"\"\"\n Function to read the atom data from a TARDIS atom HDF Store\n\n Parameters\n ----------\n fname : str, optional\n Path to the HDFStore file or name of known atom data file\n (default: None)\n \"\"\"\n\n dataframes = dict()\n nonavailable = list()\n\n fname = resolve_atom_data_fname(fname)\n\n with pd.HDFStore(fname, \"r\") as store:\n for name in cls.hdf_names:\n try:\n dataframes[name] = store[name]\n except KeyError:\n nonavailable.append(name)\n\n atom_data = cls(**dataframes)\n\n try:\n atom_data.uuid1 = store.root._v_attrs[\"uuid1\"].decode(\"ascii\")\n except KeyError:\n atom_data.uuid1 = None\n\n try:\n atom_data.md5 = store.root._v_attrs[\"md5\"].decode(\"ascii\")\n except KeyError:\n atom_data.md5 = None\n\n try:\n atom_data.version = store.root._v_attrs[\"database_version\"]\n except KeyError:\n atom_data.version = None\n\n # ToDo: strore data sources as attributes in carsus\n\n logger.info(\n \"Read Atom Data with UUID={0} and MD5={1}.\".format(\n atom_data.uuid1, atom_data.md5\n )\n )\n if nonavailable:\n logger.info(\n \"Non provided atomic data: {0}\".format(\n \", \".join(nonavailable)\n )\n )\n\n return atom_data\n\n def __init__(\n self,\n atom_data,\n ionization_data,\n levels=None,\n lines=None,\n macro_atom_data=None,\n macro_atom_references=None,\n zeta_data=None,\n collision_data=None,\n collision_data_temperatures=None,\n synpp_refs=None,\n photoionization_data=None,\n ):\n\n self.prepared = False\n\n # CONVERT VALUES TO CGS UNITS\n\n # Convert atomic masses to CGS\n # We have to use constants.u because astropy uses\n # different values for the unit u and the constant.\n # This is changed in later versions of astropy (\n # the value of constants.u is used in all cases)\n if u.u.cgs == const.u.cgs:\n atom_data.loc[:, \"mass\"] = Quantity(\n atom_data[\"mass\"].values, \"u\"\n ).cgs\n else:\n atom_data.loc[:, \"mass\"] = atom_data[\"mass\"].values * const.u.cgs\n\n # Convert ionization energies to CGS\n ionization_data = ionization_data.squeeze()\n ionization_data[:] = Quantity(ionization_data[:], \"eV\").cgs\n\n # Convert energy to CGS\n levels.loc[:, \"energy\"] = Quantity(levels[\"energy\"].values, \"eV\").cgs\n\n # Create a new columns with wavelengths in the CGS units\n lines.loc[:, \"wavelength_cm\"] = Quantity(\n lines[\"wavelength\"], \"angstrom\"\n ).cgs\n\n # SET ATTRIBUTES\n\n self.atom_data = atom_data\n self.ionization_data = ionization_data\n self.levels = levels\n self.lines = lines\n\n # Rename these (drop \"_all\") when `prepare_atom_data` is removed!\n self.macro_atom_data_all = macro_atom_data\n self.macro_atom_references_all = macro_atom_references\n\n self.zeta_data = zeta_data\n\n self.collision_data = collision_data\n self.collision_data_temperatures = collision_data_temperatures\n\n self.synpp_refs = synpp_refs\n\n self.photoionization_data = photoionization_data\n\n self._check_related()\n\n self.symbol2atomic_number = OrderedDict(\n zip(self.atom_data[\"symbol\"].values, self.atom_data.index)\n )\n self.atomic_number2symbol = OrderedDict(\n zip(self.atom_data.index, self.atom_data[\"symbol\"])\n )\n\n def _check_related(self):\n \"\"\"\n Check that either all or none of the related dataframes are given.\n \"\"\"\n for group in self.related_groups:\n check_list = [name for name in group if getattr(self, name) is None]\n\n if len(check_list) != 0 and len(check_list) != len(group):\n raise AtomDataMissingError(\n \"The following dataframes from the related group [{0}] \"\n \"were not given: {1}\".format(\n \", \".join(group), \", \".join(check_list)\n )\n )\n\n def prepare_atom_data(\n self,\n selected_atomic_numbers,\n line_interaction_type=\"scatter\",\n nlte_species=[],\n ):\n \"\"\"\n Prepares the atom data to set the lines, levels and if requested macro\n atom data. This function mainly cuts the `levels` and `lines` by\n discarding any data that is not needed (any data for atoms that are not\n needed\n\n Parameters\n ----------\n selected_atoms : set\n set of selected atom numbers, e.g. set([14, 26])\n line_interaction_type : str\n can be 'scatter', 'downbranch' or 'macroatom'\n \"\"\"\n if not self.prepared:\n self.prepared = True\n else:\n raise AtomDataNotPreparedError(\"AtomData was already prepared\")\n self.selected_atomic_numbers = selected_atomic_numbers\n\n self._check_selected_atomic_numbers()\n\n self.nlte_species = nlte_species\n\n self.levels = self.levels[\n self.levels.index.isin(\n self.selected_atomic_numbers, level=\"atomic_number\"\n )\n ]\n\n self.levels_index = pd.Series(\n np.arange(len(self.levels), dtype=int), index=self.levels.index\n )\n\n # cutting levels_lines\n self.lines = self.lines[\n self.lines.index.isin(\n self.selected_atomic_numbers, level=\"atomic_number\"\n )\n ]\n\n self.lines.sort_values(by=\"wavelength\", inplace=True)\n\n self.lines_index = pd.Series(\n np.arange(len(self.lines), dtype=int),\n index=self.lines.set_index(\"line_id\").index,\n )\n\n tmp_lines_lower2level_idx = self.lines.index.droplevel(\n \"level_number_upper\"\n )\n\n self.lines_lower2level_idx = (\n self.levels_index.loc[tmp_lines_lower2level_idx]\n .astype(np.int64)\n .values\n )\n\n tmp_lines_upper2level_idx = self.lines.index.droplevel(\n \"level_number_lower\"\n )\n\n self.lines_upper2level_idx = (\n self.levels_index.loc[tmp_lines_upper2level_idx]\n .astype(np.int64)\n .values\n )\n\n if (\n self.macro_atom_data_all is not None\n and not line_interaction_type == \"scatter\"\n ):\n\n self.macro_atom_data = self.macro_atom_data_all.loc[\n self.macro_atom_data_all[\"atomic_number\"].isin(\n self.selected_atomic_numbers\n )\n ].copy()\n\n self.macro_atom_references = self.macro_atom_references_all[\n self.macro_atom_references_all.index.isin(\n self.selected_atomic_numbers, level=\"atomic_number\"\n )\n ].copy()\n\n if line_interaction_type == \"downbranch\":\n self.macro_atom_data = self.macro_atom_data.loc[\n self.macro_atom_data[\"transition_type\"] == -1\n ]\n self.macro_atom_references = self.macro_atom_references.loc[\n self.macro_atom_references[\"count_down\"] > 0\n ]\n self.macro_atom_references.loc[\n :, \"count_total\"\n ] = self.macro_atom_references[\"count_down\"]\n self.macro_atom_references.loc[\n :, \"block_references\"\n ] = np.hstack(\n (\n 0,\n np.cumsum(\n self.macro_atom_references[\"count_down\"].values[:-1]\n ),\n )\n )\n\n elif line_interaction_type == \"macroatom\":\n self.macro_atom_references.loc[\n :, \"block_references\"\n ] = np.hstack(\n (\n 0,\n np.cumsum(\n self.macro_atom_references[\"count_total\"].values[\n :-1\n ]\n ),\n )\n )\n\n self.macro_atom_references.loc[:, \"references_idx\"] = np.arange(\n len(self.macro_atom_references)\n )\n\n self.macro_atom_data.loc[:, \"lines_idx\"] = self.lines_index.loc[\n self.macro_atom_data[\"transition_line_id\"]\n ].values\n\n self.lines_upper2macro_reference_idx = (\n self.macro_atom_references.loc[\n tmp_lines_upper2level_idx, \"references_idx\"\n ]\n .astype(np.int64)\n .values\n )\n\n if line_interaction_type == \"macroatom\":\n # Sets all\n tmp_macro_destination_level_idx = pd.MultiIndex.from_arrays(\n [\n self.macro_atom_data[\"atomic_number\"],\n self.macro_atom_data[\"ion_number\"],\n self.macro_atom_data[\"destination_level_number\"],\n ]\n )\n\n tmp_macro_source_level_idx = pd.MultiIndex.from_arrays(\n [\n self.macro_atom_data[\"atomic_number\"],\n self.macro_atom_data[\"ion_number\"],\n self.macro_atom_data[\"source_level_number\"],\n ]\n )\n\n self.macro_atom_data.loc[:, \"destination_level_idx\"] = (\n self.macro_atom_references.loc[\n tmp_macro_destination_level_idx, \"references_idx\"\n ]\n .astype(np.int64)\n .values\n )\n\n self.macro_atom_data.loc[:, \"source_level_idx\"] = (\n self.macro_atom_references.loc[\n tmp_macro_source_level_idx, \"references_idx\"\n ]\n .astype(np.int64)\n .values\n )\n\n elif line_interaction_type == \"downbranch\":\n # Sets all the destination levels to -1 to indicate that they\n # are not used in downbranch calculations\n self.macro_atom_data.loc[:, \"destination_level_idx\"] = -1\n\n self.nlte_data = NLTEData(self, nlte_species)\n\n def _check_selected_atomic_numbers(self):\n selected_atomic_numbers = self.selected_atomic_numbers\n available_atomic_numbers = np.unique(\n self.ionization_data.index.get_level_values(0)\n )\n atomic_number_check = np.isin(\n selected_atomic_numbers, available_atomic_numbers\n )\n\n if not all(atomic_number_check):\n missing_atom_mask = np.logical_not(atomic_number_check)\n missing_atomic_numbers = selected_atomic_numbers[missing_atom_mask]\n missing_numbers_str = \",\".join(missing_atomic_numbers.astype(\"str\"))\n msg = \"For atomic numbers {} there is no atomic data.\".format(\n missing_numbers_str\n )\n raise AtomDataMissingError(msg)\n\n def __repr__(self):\n return \"<Atomic Data UUID={} MD5={} Lines={:d} Levels={:d}>\".format(\n self.uuid1,\n self.md5,\n self.lines.line_id.count(),\n self.levels.energy.count(),\n )\n\n\nclass NLTEData(object):\n def __init__(self, atom_data, nlte_species):\n self.atom_data = atom_data\n self.lines = atom_data.lines.reset_index()\n self.nlte_species = nlte_species\n\n if nlte_species:\n logger.info(\"Preparing the NLTE data\")\n self._init_indices()\n if atom_data.collision_data is not None:\n self._create_collision_coefficient_matrix()\n\n def _init_indices(self):\n self.lines_idx = {}\n self.lines_level_number_lower = {}\n self.lines_level_number_upper = {}\n self.A_uls = {}\n self.B_uls = {}\n self.B_lus = {}\n\n for species in self.nlte_species:\n lines_idx = np.where(\n (self.lines.atomic_number == species[0])\n & (self.lines.ion_number == species[1])\n )\n self.lines_idx[species] = lines_idx\n self.lines_level_number_lower[\n species\n ] = self.lines.level_number_lower.values[lines_idx].astype(int)\n self.lines_level_number_upper[\n species\n ] = self.lines.level_number_upper.values[lines_idx].astype(int)\n\n self.A_uls[species] = self.atom_data.lines.A_ul.values[lines_idx]\n self.B_uls[species] = self.atom_data.lines.B_ul.values[lines_idx]\n self.B_lus[species] = self.atom_data.lines.B_lu.values[lines_idx]\n\n def _create_collision_coefficient_matrix(self):\n self.C_ul_interpolator = {}\n self.delta_E_matrices = {}\n self.g_ratio_matrices = {}\n collision_group = self.atom_data.collision_data.groupby(\n level=[\"atomic_number\", \"ion_number\"]\n )\n for species in self.nlte_species:\n no_of_levels = self.atom_data.levels.loc[species].energy.count()\n C_ul_matrix = np.zeros(\n (\n no_of_levels,\n no_of_levels,\n len(self.atom_data.collision_data_temperatures),\n )\n )\n delta_E_matrix = np.zeros((no_of_levels, no_of_levels))\n g_ratio_matrix = np.zeros((no_of_levels, no_of_levels))\n\n for (\n (\n atomic_number,\n ion_number,\n level_number_lower,\n level_number_upper,\n ),\n line,\n ) in collision_group.get_group(species).iterrows():\n # line.columns : delta_e, g_ratio, temperatures ...\n C_ul_matrix[\n level_number_lower, level_number_upper, :\n ] = line.values[2:]\n delta_E_matrix[level_number_lower, level_number_upper] = line[\n \"delta_e\"\n ]\n # TODO TARDISATOMIC fix change the g_ratio to be the otherway round - I flip them now here.\n g_ratio_matrix[level_number_lower, level_number_upper] = (\n 1 / line[\"g_ratio\"]\n )\n self.C_ul_interpolator[species] = interpolate.interp1d(\n self.atom_data.collision_data_temperatures, C_ul_matrix\n )\n self.delta_E_matrices[species] = delta_E_matrix\n\n self.g_ratio_matrices[species] = g_ratio_matrix\n\n def get_collision_matrix(self, species, t_electrons):\n \"\"\"\n Creat collision matrix by interpolating the C_ul values for\n the desired temperatures.\n \"\"\"\n c_ul_matrix = self.C_ul_interpolator[species](t_electrons)\n no_of_levels = c_ul_matrix.shape[0]\n c_ul_matrix[np.isnan(c_ul_matrix)] = 0.0\n\n # TODO in tardisatomic the g_ratio is the other way round - here I'll flip it in prepare_collision matrix\n\n c_lu_matrix = (\n c_ul_matrix\n * np.exp(\n -self.delta_E_matrices[species].reshape(\n (no_of_levels, no_of_levels, 1)\n )\n / t_electrons.reshape((1, 1, t_electrons.shape[0]))\n )\n * self.g_ratio_matrices[species].reshape(\n (no_of_levels, no_of_levels, 1)\n )\n )\n return c_ul_matrix + c_lu_matrix.transpose(1, 0, 2)\n"
] |
[
[
"numpy.logical_not",
"numpy.isnan",
"numpy.cumsum",
"pandas.MultiIndex.from_arrays",
"scipy.interpolate.interp1d",
"pandas.HDFStore",
"numpy.zeros",
"numpy.where",
"numpy.isin"
]
] |
jamesetouma/manim
|
[
"8b1b30b75853446d0403df8abe67d0bd9c8db033",
"2834fbbba6dae88d64bd3ae46d9c8c6271eacfc7"
] |
[
"manim/utils/iterables.py",
"manim/utils/hashing.py"
] |
[
"__all__ = [\n \"remove_list_redundancies\",\n \"list_update\",\n \"list_difference_update\",\n \"all_elements_are_instances\",\n \"adjacent_n_tuples\",\n \"adjacent_pairs\",\n \"tuplify\",\n \"stretch_array_to_length\",\n \"make_even\",\n \"make_even_by_cycling\",\n \"remove_nones\",\n \"concatenate_lists\",\n]\n\n\nimport itertools as it\nimport numpy as np\n\n\ndef remove_list_redundancies(l):\n \"\"\"\n Used instead of list(set(l)) to maintain order\n Keeps the last occurance of each element\n \"\"\"\n reversed_result = []\n used = set()\n for x in reversed(l):\n if x not in used:\n reversed_result.append(x)\n used.add(x)\n reversed_result.reverse()\n return reversed_result\n\n\ndef list_update(l1, l2):\n \"\"\"\n Used instead of list(set(l1).update(l2)) to maintain order,\n making sure duplicates are removed from l1, not l2.\n \"\"\"\n return [e for e in l1 if e not in l2] + list(l2)\n\n\ndef list_difference_update(l1, l2):\n return [e for e in l1 if e not in l2]\n\n\ndef all_elements_are_instances(iterable, Class):\n return all([isinstance(e, Class) for e in iterable])\n\n\ndef adjacent_n_tuples(objects, n):\n return zip(*[[*objects[k:], *objects[:k]] for k in range(n)])\n\n\ndef adjacent_pairs(objects):\n return adjacent_n_tuples(objects, 2)\n\n\ndef tuplify(obj):\n if isinstance(obj, str):\n return (obj,)\n try:\n return tuple(obj)\n except TypeError:\n return (obj,)\n\n\ndef stretch_array_to_length(nparray, length):\n curr_len = len(nparray)\n if curr_len > length:\n raise Warning(\"Trying to stretch array to a length shorter than its own\")\n indices = np.arange(length) / float(length)\n indices *= curr_len\n return nparray[indices.astype(\"int\")]\n\n\ndef make_even(iterable_1, iterable_2):\n list_1, list_2 = list(iterable_1), list(iterable_2)\n length = max(len(list_1), len(list_2))\n return (\n [list_1[(n * len(list_1)) // length] for n in range(length)],\n [list_2[(n * len(list_2)) // length] for n in range(length)],\n )\n\n\ndef make_even_by_cycling(iterable_1, iterable_2):\n length = max(len(iterable_1), len(iterable_2))\n cycle1 = it.cycle(iterable_1)\n cycle2 = it.cycle(iterable_2)\n return (\n [next(cycle1) for x in range(length)],\n [next(cycle2) for x in range(length)],\n )\n\n\ndef remove_nones(sequence):\n return [x for x in sequence if x]\n\n\n# Note this is redundant with it.chain\n\n\ndef concatenate_lists(*list_of_lists):\n return [item for l in list_of_lists for item in l]\n",
"import json\nimport zlib\nimport inspect\nimport copy\nimport numpy as np\nfrom types import ModuleType, MappingProxyType, FunctionType, MethodType\nfrom time import perf_counter\n\nfrom .. import logger\n\nALREADY_PROCESSED_ID = {}\n\n\nclass CustomEncoder(json.JSONEncoder):\n def default(self, obj):\n \"\"\"\n This method is used to serialize objects to JSON format.\n\n If obj is a function, then it will return a dict with two keys : 'code', for the code source, and 'nonlocals' for all nonlocalsvalues. (including nonlocals functions, that will be serialized as this is recursive.)\n if obj is a np.darray, it converts it into a list.\n if obj is an object with __dict__ attribute, it returns its __dict__.\n Else, will let the JSONEncoder do the stuff, and throw an error if the type is not suitable for JSONEncoder.\n\n Parameters\n ----------\n obj : Any\n Arbitrary object to convert\n\n Returns\n -------\n Any\n Python object that JSON encoder will recognize\n\n \"\"\"\n if not (isinstance(obj, ModuleType)) and isinstance(\n obj, (MethodType, FunctionType)\n ):\n cvars = inspect.getclosurevars(obj)\n cvardict = {**copy.copy(cvars.globals), **copy.copy(cvars.nonlocals)}\n for i in list(cvardict):\n # NOTE : All module types objects are removed, because otherwise it throws ValueError: Circular reference detected if not. TODO\n if isinstance(cvardict[i], ModuleType):\n del cvardict[i]\n return self._check_iterable(\n {\"code\": inspect.getsource(obj), \"nonlocals\": cvardict}\n )\n elif isinstance(obj, np.ndarray):\n if obj.size > 1000:\n obj = np.resize(obj, (100, 100))\n return f\"TRUNCATED ARRAY: {repr(obj)}\"\n # We return the repr and not a list to avoid the JsonEncoder to iterate over it.\n return repr(obj)\n elif hasattr(obj, \"__dict__\"):\n temp = getattr(obj, \"__dict__\")\n # MappingProxy is scene-caching nightmare. It contains all of the object methods and attributes. We skip it as the mechanism will at some point process the object, but instancied\n # Indeed, there is certainly no case where scene-caching will recieve only a non instancied object, as this is never used in the library or encouraged to be used user-side.\n if isinstance(temp, MappingProxyType):\n return \"MappingProxy\"\n return self._check_iterable(temp)\n elif isinstance(obj, np.uint8):\n return int(obj)\n\n return f\"Unsupported type for serializing -> {str(type(obj))}\"\n\n def _handle_already_processed(self, obj):\n \"\"\"Handle if an object has been already processed by checking the id of the object.\n\n This prevents the mechanism to handle an object several times, and is used to prevent any circular reference.\n\n Parameters\n ----------\n obj : Any\n The obj to check.\n\n Returns\n -------\n Any\n \"already_processed\" string if it has been processed, otherwise obj.\n \"\"\"\n global ALREADY_PROCESSED_ID\n if id(obj) in ALREADY_PROCESSED_ID:\n return \"already_processed\"\n if not isinstance(obj, (str, int, bool, float)):\n ALREADY_PROCESSED_ID[id(obj)] = obj\n return obj\n\n def _check_iterable(self, iterable):\n \"\"\"Check for circular reference at each iterable that will go through the JSONEncoder, as well as key of the wrong format.\n\n If a key with a bad format is found (i.e not a int, string, or float), it gets replaced byt its hash using the same process implemented here.\n If a circular reference is found within the iterable, it will be replaced by the string \"already processed\".\n\n Parameters\n ----------\n iterable : Iterable[Any]\n The iterable to check.\n \"\"\"\n\n def _key_to_hash(key):\n return zlib.crc32(json.dumps(key, cls=CustomEncoder).encode())\n\n def _iter_check_list(lst):\n # We have to make a copy, as we don't want to touch to the original list\n # A deepcopy isn't necessary as it is already recursive.\n lst_copy = copy.copy(lst)\n for i, el in enumerate(lst):\n if not isinstance(lst, tuple):\n lst_copy[i] = self._handle_already_processed(\n el\n ) # ISSUE here, because of copy.\n if isinstance(el, (list, tuple)):\n lst_copy[i] = _iter_check_list(el)\n elif isinstance(el, dict):\n lst_copy[i] = _iter_check_dict(el)\n return lst_copy\n\n def _iter_check_dict(dct):\n # We have to make a copy, as we don't want to touch to the original dict\n # A deepcopy isn't necessary as it is already recursive.\n dct_copy = copy.copy(dct)\n for k, v in dct.items():\n dct_copy[k] = self._handle_already_processed(v)\n # We check if the k is of the right format (supporter by Json)\n if not isinstance(k, (str, int, float, bool)) and k is not None:\n k_new = _key_to_hash(k)\n # We delete the value coupled with the old key, as the value is now coupled with the new key.\n dct_copy[k_new] = dct_copy[k]\n del dct_copy[k]\n else:\n k_new = k\n if isinstance(v, dict):\n dct_copy[k_new] = _iter_check_dict(v)\n elif isinstance(v, (list, tuple)):\n dct_copy[k_new] = _iter_check_list(v)\n return dct_copy\n\n if isinstance(iterable, (list, tuple)):\n return _iter_check_list(iterable)\n elif isinstance(iterable, dict):\n return _iter_check_dict(iterable)\n\n def encode(self, obj):\n \"\"\"Overriding of :meth:`JSONEncoder.encode`, to make our own process.\n\n Parameters\n ----------\n obj: Any\n The object to encode in JSON.\n\n Returns\n -------\n :class:`str`\n The object encoder with the standard json process.\n \"\"\"\n # We need to mark as already processed the first object to go in the process,\n # As after, only objects that come from iterables will be marked as such.\n global ALREADY_PROCESSED_ID\n ALREADY_PROCESSED_ID[id(obj)] = obj\n if isinstance(obj, (dict, list, tuple)):\n return super().encode(self._check_iterable(obj))\n return super().encode(obj)\n\n\ndef get_json(obj):\n \"\"\"Recursively serialize `object` to JSON using the :class:`CustomEncoder` class.\n\n Paramaters\n ----------\n dict_config : :class:`dict`\n The dict to flatten\n\n Returns\n -------\n :class:`str`\n The flattened object\n \"\"\"\n return json.dumps(obj, cls=CustomEncoder)\n\n\ndef get_camera_dict_for_hashing(camera_object):\n \"\"\"Remove some keys from `camera_object.__dict__` that are very heavy and useless for the caching functionality.\n\n Parameters\n ----------\n camera_object : :class:`~.Camera`\n The camera object used in the scene\n\n Returns\n -------\n :class:`dict`\n `Camera.__dict__` but cleaned.\n \"\"\"\n camera_object_dict = copy.copy(camera_object.__dict__)\n # We have to clean a little bit of camera_dict, as pixel_array and background are two very big numpy arrays.\n # They are not essential to caching process.\n # We also have to remove pixel_array_to_cairo_context as it contains used memory adress (set randomly). See l.516 get_cached_cairo_context in camera.py\n for to_clean in [\"background\", \"pixel_array\", \"pixel_array_to_cairo_context\"]:\n camera_object_dict.pop(to_clean, None)\n return camera_object_dict\n\n\ndef get_hash_from_play_call(\n scene_object, camera_object, animations_list, current_mobjects_list\n):\n \"\"\"Take the list of animations and a list of mobjects and output their hashes. This is meant to be used for `scene.play` function.\n\n Parameters\n -----------\n scene_object : :class:`~.Scene`\n The scene object.\n\n camera_object : :class:`~.Camera`\n The camera object used in the scene.\n\n animations_list : Iterable[:class:`~.Animation`]\n The list of animations.\n\n current_mobjects_list : Iterable[:class:`~.Mobject`]\n The list of mobjects.\n\n Returns\n -------\n :class:`str`\n A string concatenation of the respective hashes of `camera_object`, `animations_list` and `current_mobjects_list`, separated by `_`.\n \"\"\"\n logger.debug(\"Hashing ...\")\n global ALREADY_PROCESSED_ID\n # We add the scene object within the ALREADY_PROCESSED_ID, as we don't want to process because pretty much all of its attributes will be soon or later processed (in one of the three hashes).\n ALREADY_PROCESSED_ID = {id(scene_object): scene_object}\n t_start = perf_counter()\n camera_json = get_json(get_camera_dict_for_hashing(camera_object))\n animations_list_json = [get_json(x) for x in sorted(animations_list, key=str)]\n current_mobjects_list_json = [\n get_json(x) for x in sorted(current_mobjects_list, key=str)\n ]\n hash_camera, hash_animations, hash_current_mobjects = [\n zlib.crc32(repr(json_val).encode())\n for json_val in [camera_json, animations_list_json, current_mobjects_list_json]\n ]\n t_end = perf_counter()\n logger.debug(\"Hashing done in {:.5f} s.\".format(t_end - t_start))\n # This will reset ALREADY_PROCESSED_ID as all the hashing processus is finished.\n ALREADY_PROCESSED_ID = {}\n return \"{}_{}_{}\".format(hash_camera, hash_animations, hash_current_mobjects)\n\n\ndef get_hash_from_wait_call(\n scene_object,\n camera_object,\n wait_time,\n stop_condition_function,\n current_mobjects_list,\n):\n \"\"\"Take a wait time, a boolean function as a stop condition and a list of mobjects, and then output their individual hashes. This is meant to be used for `scene.wait` function.\n\n Parameters\n -----------\n scene_object : :class:`~.Scene`\n The scene object.\n camera_object : :class:`~.Camera`\n The camera object.\n wait_time : :class:`float`\n The time to wait\n stop_condition_function : Callable[[...], bool]\n Boolean function used as a stop_condition in `wait`.\n\n Returns\n -------\n :class:`str`\n A concatenation of the respective hashes of `animations_list and `current_mobjects_list`, separated by `_`.\n \"\"\"\n logger.debug(\"Hashing ...\")\n t_start = perf_counter()\n global ALREADY_PROCESSED_ID\n # We add the scene object within the ALREADY_PROCESSED_ID, as we don't want to process because pretty much all of its attributes will be soon or later processed (in one of the three hashes).\n ALREADY_PROCESSED_ID = {id(scene_object): scene_object}\n camera_json = get_json(get_camera_dict_for_hashing(camera_object))\n current_mobjects_list_json = [\n get_json(x) for x in sorted(current_mobjects_list, key=str)\n ]\n hash_current_mobjects = zlib.crc32(repr(current_mobjects_list_json).encode())\n hash_camera = zlib.crc32(repr(camera_json).encode())\n if stop_condition_function is not None:\n hash_function = zlib.crc32(get_json(stop_condition_function).encode())\n # This will reset ALREADY_PROCESSED_ID as all the hashing processus is finished.\n ALREADY_PROCESSED_ID = {}\n t_end = perf_counter()\n logger.debug(\"Hashing done in {:.5f} s.\".format(t_end - t_start))\n return \"{}_{}{}_{}\".format(\n hash_camera,\n str(wait_time).replace(\".\", \"-\"),\n hash_function,\n hash_current_mobjects,\n )\n ALREADY_PROCESSED_ID = {}\n t_end = perf_counter()\n logger.debug(\"Hashing done in {:.5f} s.\".format(t_end - t_start))\n return \"{}_{}_{}\".format(\n hash_camera, str(wait_time).replace(\".\", \"-\"), hash_current_mobjects\n )\n"
] |
[
[
"numpy.arange"
],
[
"numpy.resize"
]
] |
guillemcortes/FBK-Fairseq-ST
|
[
"66b4d0ff8b211750c8c955c3d3f277f41713996e"
] |
[
"fairseq/modules/multihead_attention.py"
] |
[
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nimport torch\nfrom torch import nn\nfrom torch.nn import Parameter\nimport torch.nn.functional as F\nimport math\n\nfrom fairseq import utils\n\n\nclass MultiheadAttention(nn.Module):\n \"\"\"Multi-headed attention.\n\n See \"Attention Is All You Need\" for more details.\n \"\"\"\n\n def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim ** -0.5\n\n self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))\n if bias:\n self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))\n else:\n self.register_parameter('in_proj_bias', None)\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n if add_bias_kv:\n self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))\n self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))\n else:\n self.bias_k = self.bias_v = None\n\n self.add_zero_attn = add_zero_attn\n\n self.reset_parameters()\n\n self.onnx_trace = False\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def reset_parameters(self):\n nn.init.xavier_uniform_(self.in_proj_weight)\n nn.init.xavier_uniform_(self.out_proj.weight)\n if self.in_proj_bias is not None:\n nn.init.constant_(self.in_proj_bias, 0.)\n nn.init.constant_(self.out_proj.bias, 0.)\n if self.bias_k is not None:\n nn.init.xavier_normal_(self.bias_k)\n if self.bias_v is not None:\n nn.init.xavier_normal_(self.bias_v)\n\n def forward(self, query, key, value, key_padding_mask=None, incremental_state=None,\n need_weights=True, static_kv=False, attn_mask=None):\n \"\"\"Input shape: Time x Batch x Channel\n\n Self-attention can be implemented by passing in the same arguments for\n query, key and value. Timesteps can be masked by supplying a T x T mask in the\n `attn_mask` argument. Padding elements can be excluded from\n the key by passing a binary ByteTensor (`key_padding_mask`) with shape:\n batch x src_len, where padding elements are indicated by 1s.\n \"\"\"\n\n qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()\n kv_same = key.data_ptr() == value.data_ptr()\n\n tgt_len, bsz, embed_dim = query.size()\n assert embed_dim == self.embed_dim\n assert list(query.size()) == [tgt_len, bsz, embed_dim]\n assert key.size() == value.size()\n\n if incremental_state is not None:\n saved_state = self._get_input_buffer(incremental_state)\n if 'prev_key' in saved_state:\n # previous time steps are cached - no need to recompute\n # key and value if they are static\n if static_kv:\n assert kv_same and not qkv_same\n key = value = None\n else:\n saved_state = None\n\n if qkv_same:\n # self-attention\n q, k, v = self.in_proj_qkv(query)\n elif kv_same:\n # encoder-decoder attention\n q = self.in_proj_q(query)\n if key is None:\n assert value is None\n k = v = None\n else:\n k, v = self.in_proj_kv(key)\n else:\n q = self.in_proj_q(query)\n k = self.in_proj_k(key)\n v = self.in_proj_v(value)\n q *= self.scaling\n\n if self.bias_k is not None:\n assert self.bias_v is not None\n k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])\n v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])\n if attn_mask is not None:\n attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)\n\n q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n if k is not None:\n k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n if v is not None:\n v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)\n\n if saved_state is not None:\n # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)\n if 'prev_key' in saved_state:\n prev_key = saved_state['prev_key'].view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n k = prev_key\n else:\n k = torch.cat((prev_key, k), dim=1)\n if 'prev_value' in saved_state:\n prev_value = saved_state['prev_value'].view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n v = prev_value\n else:\n v = torch.cat((prev_value, v), dim=1)\n saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state['prev_value'] = v.view(bsz, self.num_heads, -1, self.head_dim)\n\n self._set_input_buffer(incremental_state, saved_state)\n\n src_len = k.size(1)\n\n if key_padding_mask is not None:\n assert key_padding_mask.size(0) == bsz\n assert key_padding_mask.size(1) == src_len\n\n if self.add_zero_attn:\n src_len += 1\n k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)\n v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)\n if attn_mask is not None:\n attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)\n\n attn_weights = torch.bmm(q, k.transpose(1, 2))\n assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]\n\n if attn_mask is not None:\n attn_mask = attn_mask.unsqueeze(0)\n if self.onnx_trace:\n attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)\n attn_weights += attn_mask\n\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n if self.onnx_trace:\n attn_weights = torch.where(\n key_padding_mask.unsqueeze(1).unsqueeze(2),\n torch.Tensor([float(\"-Inf\")]),\n attn_weights.float()\n ).type_as(attn_weights)\n else:\n attn_weights = attn_weights.float().masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2),\n float('-inf'),\n ).type_as(attn_weights) # FP16 support: cast to float and back\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(attn_weights)\n attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)\n\n attn = torch.bmm(attn_weights, v)\n assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]\n\n if (self.onnx_trace and attn.size(1) == 1):\n # when ONNX tracing a single decoder step (sequence length == 1)\n # the transpose is a no-op copy before view, thus unnecessary\n attn = attn.contiguous().view(tgt_len, bsz, embed_dim)\n else:\n attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)\n attn = self.out_proj(attn)\n\n if need_weights:\n # average attention weights over heads\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.sum(dim=1) / self.num_heads\n else:\n attn_weights = None\n\n return attn, attn_weights\n\n def in_proj_qkv(self, query):\n return self._in_proj(query).chunk(3, dim=-1)\n\n def in_proj_kv(self, key):\n return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)\n\n def in_proj_q(self, query):\n return self._in_proj(query, end=self.embed_dim)\n\n def in_proj_k(self, key):\n return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)\n\n def in_proj_v(self, value):\n return self._in_proj(value, start=2 * self.embed_dim)\n\n def _in_proj(self, input, start=0, end=None):\n weight = self.in_proj_weight\n bias = self.in_proj_bias\n weight = weight[start:end, :]\n if bias is not None:\n bias = bias[start:end]\n return F.linear(input, weight, bias)\n\n def reorder_incremental_state(self, incremental_state, new_order):\n \"\"\"Reorder buffered internal state (for incremental generation).\"\"\"\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is not None:\n for k in input_buffer.keys():\n input_buffer[k] = input_buffer[k].index_select(0, new_order)\n self._set_input_buffer(incremental_state, input_buffer)\n\n def _get_input_buffer(self, incremental_state):\n return utils.get_incremental_state(\n self,\n incremental_state,\n 'attn_state',\n ) or {}\n\n def _set_input_buffer(self, incremental_state, buffer):\n utils.set_incremental_state(\n self,\n incremental_state,\n 'attn_state',\n buffer,\n )\n\ndef gaussian(matrix, batch_size=1, variance=1e-6):\n num = (matrix * matrix).unsqueeze(0)\n denom = (2 * variance * variance).unsqueeze(1).unsqueeze(2).repeat(batch_size, 1, 1)\n return num / denom\n\ndef log(matrix, batch_size=1, unused=None):\n return torch.max(torch.zeros_like(matrix), torch.log(matrix))\n\nclass LocalMultiheadAttention(nn.Module):\n \"\"\"Multi-headed attention.\n\n See \"Attention Is All You Need\" for more details.\n \"\"\"\n\n Penalties = {'log': log,\n 'gauss': gaussian}\n\n def __init__(self, embed_dim, num_heads, dropout=0., bias=True, penalty='log', **kwargs):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim**-0.5\n self._mask = None\n\n self.in_proj_weight = Parameter(torch.Tensor(3*embed_dim, embed_dim))\n if bias:\n self.in_proj_bias = Parameter(torch.Tensor(3*embed_dim))\n else:\n self.register_parameter('in_proj_bias', None)\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.penalty = type(self).Penalties[penalty]\n\n if penalty == 'gauss':\n self.vars = Parameter(torch.Tensor(self.num_heads).fill_(kwargs['init_variance']))\n\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.xavier_uniform_(self.in_proj_weight)\n nn.init.xavier_uniform_(self.out_proj.weight)\n if self.in_proj_bias is not None:\n nn.init.constant_(self.in_proj_bias, 0.)\n nn.init.constant_(self.out_proj.bias, 0.)\n\n def forward(self, query, key, value, mask_future_timesteps=False,\n key_padding_mask=None, incremental_state=None,\n need_weights=True, static_kv=False):\n \"\"\"Input shape: Time x Batch x Channel\n\n Self-attention can be implemented by passing in the same arguments for\n query, key and value. Future timesteps can be masked with the\n `mask_future_timesteps` argument. Padding elements can be excluded from\n the key by passing a binary ByteTensor (`key_padding_mask`) with shape:\n batch x src_len, where padding elements are indicated by 1s.\n \"\"\"\n\n qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()\n kv_same = key.data_ptr() == value.data_ptr()\n\n tgt_len, bsz, embed_dim = query.size()\n if embed_dim != self.embed_dim:\n print(\"| x: {}, multi_head: {}\".format(embed_dim, self.embed_dim))\n assert embed_dim == self.embed_dim\n assert list(query.size()) == [tgt_len, bsz, embed_dim]\n assert key.size() == value.size()\n\n if incremental_state is not None:\n saved_state = self._get_input_buffer(incremental_state)\n if 'prev_key' in saved_state:\n # previous time steps are cached - no need to recompute\n # key and value if they are static\n if static_kv:\n assert kv_same and not qkv_same\n key = value = None\n else:\n saved_state = None\n\n if qkv_same:\n # self-attention\n q, k, v = self.in_proj_qkv(query)\n elif kv_same:\n # encoder-decoder attention\n q = self.in_proj_q(query)\n if key is None:\n assert value is None\n # this will allow us to concat it with previous value and get\n # just get the previous value\n k = v = q.new(0)\n else:\n k, v = self.in_proj_kv(key)\n else:\n q = self.in_proj_q(query)\n k = self.in_proj_k(key)\n v = self.in_proj_v(value)\n q *= self.scaling\n\n if saved_state is not None:\n if 'prev_key' in saved_state:\n k = torch.cat((saved_state['prev_key'], k), dim=0)\n if 'prev_value' in saved_state:\n v = torch.cat((saved_state['prev_value'], v), dim=0)\n saved_state['prev_key'] = k\n saved_state['prev_value'] = v\n self._set_input_buffer(incremental_state, saved_state)\n\n src_len = k.size(0)\n\n if key_padding_mask is not None:\n assert key_padding_mask.size(0) == bsz\n assert key_padding_mask.size(1) == src_len\n\n q = q.contiguous().view(tgt_len, bsz*self.num_heads, self.head_dim).transpose(0, 1)\n k = k.contiguous().view(src_len, bsz*self.num_heads, self.head_dim).transpose(0, 1)\n v = v.contiguous().view(src_len, bsz*self.num_heads, self.head_dim).transpose(0, 1)\n\n attn_weights = torch.bmm(q, k.transpose(1, 2))\n assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]\n\n # only apply masking at training time (when incremental state is None)\n if mask_future_timesteps and incremental_state is None:\n assert query.size() == key.size(), \\\n 'mask_future_timesteps only applies to self-attention'\n attn_weights += self.buffered_mask(attn_weights).unsqueeze(0)\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.float().masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2),\n float('-inf'),\n ).type_as(attn_weights) # FP16 support: cast to float and back\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n # Local attention\n pos_diff = torch.abs(torch.arange(tgt_len).unsqueeze(1) - torch.arange(src_len).unsqueeze(0)).float().cuda()\n variance = self.vars if hasattr(self, 'vars') else None\n local_mask = self.penalty(pos_diff, bsz, variance)\n attn_weights = attn_weights - local_mask\n\n attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(attn_weights)\n attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)\n\n attn = torch.bmm(attn_weights, v)\n assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]\n attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)\n attn = self.out_proj(attn)\n\n if need_weights:\n # average attention weights over heads\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.sum(dim=1) / self.num_heads\n else:\n attn_weights = None\n\n return attn, attn_weights\n\n def in_proj_qkv(self, query):\n return self._in_proj(query).chunk(3, dim=-1)\n\n def in_proj_kv(self, key):\n return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)\n\n def in_proj_q(self, query):\n return self._in_proj(query, end=self.embed_dim)\n\n def in_proj_k(self, key):\n return self._in_proj(key, start=self.embed_dim, end=2*self.embed_dim)\n\n def in_proj_v(self, value):\n return self._in_proj(value, start=2*self.embed_dim)\n\n def _in_proj(self, input, start=None, end=None):\n weight = self.in_proj_weight\n bias = self.in_proj_bias\n if end is not None:\n weight = weight[:end, :]\n if bias is not None:\n bias = bias[:end]\n if start is not None:\n weight = weight[start:, :]\n if bias is not None:\n bias = bias[start:]\n return F.linear(input, weight, bias)\n\n def buffered_mask(self, tensor):\n dim = tensor.size(-1)\n if self._mask is None:\n self._mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)\n if self._mask.size(0) < dim:\n self._mask = torch.triu(utils.fill_with_neg_inf(self._mask.resize_(dim, dim)), 1)\n return self._mask[:dim, :dim]\n\n def reorder_incremental_state(self, incremental_state, new_order):\n \"\"\"Reorder buffered internal state (for incremental generation).\"\"\"\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is not None:\n for k in input_buffer.keys():\n input_buffer[k] = input_buffer[k].index_select(1, new_order)\n self._set_input_buffer(incremental_state, input_buffer)\n\n def _get_input_buffer(self, incremental_state):\n return utils.get_incremental_state(\n self,\n incremental_state,\n 'attn_state',\n ) or {}\n\n def _set_input_buffer(self, incremental_state, buffer):\n utils.set_incremental_state(\n self,\n incremental_state,\n 'attn_state',\n buffer,\n )\n\n\n\nclass ConvAttention2D(nn.Module):\n \"\"\"Multi-headed attention.\n\n See \"Attention Is All You Need\" for more details.\n \"\"\"\n\n def __init__(self, embed_dim, num_heads, dropout=0., bias=True):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim # // num_heads\n # assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim ** -0.5\n self._mask = None\n\n self.in_proj_weight = Parameter(torch.Tensor(3 * num_heads, embed_dim, 3, 3))\n if bias:\n self.in_proj_bias = Parameter(torch.Tensor(3 * num_heads))\n else:\n self.register_parameter('in_proj_bias', None)\n self.out_proj = nn.Conv2d(2 * self.num_heads, embed_dim, 3, padding=1, bias=bias)\n self.bn_q = BatchNorm(self.num_heads)\n self.bn_k = BatchNorm(self.num_heads)\n self.bn_v = BatchNorm(self.num_heads)\n self.bn_out = BatchNorm(embed_dim)\n self.relu = F.relu\n\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.xavier_uniform_(self.in_proj_weight)\n nn.init.xavier_uniform_(self.out_proj.weight)\n if self.in_proj_bias is not None:\n nn.init.constant_(self.in_proj_bias, 0.)\n nn.init.constant_(self.out_proj.bias, 0.)\n\n def forward(self, query, key, value, mask_future_timesteps=False,\n key_padding_mask=None, incremental_state=None,\n need_weights=True, static_kv=False):\n \"\"\"Input shape: Time x Batch x Channel\n\n Self-attention can be implemented by passing in the same arguments for\n query, key and value. Future timesteps can be masked with the\n `mask_future_timesteps` argument. Padding elements can be excluded from\n the key by passing a binary ByteTensor (`key_padding_mask`) with shape:\n batch x src_len, where padding elements are indicated by 1s.\n \"\"\"\n\n qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()\n kv_same = key.data_ptr() == value.data_ptr()\n\n bsz, channels, tgt_len, embed_dim = query.size()\n assert key.size() == value.size()\n\n if qkv_same:\n # self-attention\n q, k, v = self.in_proj_qkv(query)\n elif kv_same:\n # encoder-decoder attention\n q = self.in_proj_q(query)\n if key is None:\n assert value is None\n # this will allow us to concat it with previous value and get\n # just get the previous value\n k = v = q.new(0)\n else:\n k, v = self.in_proj_kv(key)\n else:\n q = self.in_proj_q(query)\n k = self.in_proj_k(key)\n v = self.in_proj_v(value)\n q *= self.scaling\n\n src_len = k.size(2)\n freq_len = k.size(3)\n if key_padding_mask is not None:\n assert key_padding_mask.size(0) == bsz\n assert key_padding_mask.size(1) == src_len\n\n q = self.relu(self.bn_q(q.contiguous()).view(bsz * self.num_heads, tgt_len, freq_len))\n k = self.relu(self.bn_k(k.contiguous()).view(bsz * self.num_heads, src_len, freq_len))\n v = self.relu(self.bn_v(v.contiguous()).view(bsz * self.num_heads, src_len, freq_len))\n\n attn_weights_t = torch.bmm(q, k.transpose(1, 2))\n assert list(attn_weights_t.size()) == [bsz * self.num_heads, tgt_len, src_len]\n\n # only apply masking at training time (when incremental state is None)\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights_t = attn_weights_t.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights_t = attn_weights_t.float().masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2),\n float('-inf'),\n ).type_as(attn_weights_t) # FP16 support: cast to float and back\n attn_weights_t = attn_weights_t.view(bsz * self.num_heads, tgt_len, src_len)\n attn_weights_t = F.softmax(attn_weights_t.float(), dim=-1).type_as(attn_weights_t)\n attn_weights_t = F.dropout(attn_weights_t, p=self.dropout, training=self.training)\n\n attn_t = torch.bmm(attn_weights_t, v)\n assert list(attn_t.size()) == [bsz * self.num_heads, tgt_len, freq_len]\n q_f = q.transpose(2, 1)\n v_f = v.transpose(2, 1)\n\n attn_weights_f = torch.bmm(q_f, k)\n assert list(attn_weights_f.size()) == [bsz * self.num_heads, freq_len, freq_len]\n\n # only apply masking at training time (when incremental state is None)\n attn_weights_f = F.softmax(attn_weights_f.float(), dim=-1).type_as(attn_weights_f)\n attn_weights_f = F.dropout(attn_weights_f, p=self.dropout, training=self.training)\n\n attn_f = torch.bmm(attn_weights_f, v_f)\n assert list(attn_f.size()) == [bsz * self.num_heads, freq_len, tgt_len]\n attn_t = attn_t.view(bsz, self.num_heads, tgt_len, freq_len).contiguous()\n attn_f = attn_f.transpose(1, 2).view(bsz, self.num_heads, tgt_len, freq_len).contiguous()\n attn = torch.cat([attn_t, attn_f], dim=1).contiguous()\n attn = self.relu(self.bn_out(self.out_proj(attn)))\n\n if need_weights:\n # average attention weights over heads\n attn_weights_t = attn_weights_t.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights_t = attn_weights_t.sum(dim=1) / self.num_heads\n else:\n attn_weights_t = None\n\n return attn, attn_weights_t\n\n def in_proj_qkv(self, query):\n return self._in_proj(query).chunk(3, dim=1)\n\n def in_proj_kv(self, key):\n return self._in_proj(key, start=1).chunk(2, dim=1)\n\n def in_proj_q(self, query):\n return self._in_proj(query, end=1)\n\n def in_proj_k(self, key):\n return self._in_proj(key, start=1, end=2)\n\n def in_proj_v(self, value):\n return self._in_proj(value, start=2)\n\n def _in_proj(self, input, start=None, end=None):\n weight = self.in_proj_weight\n bias = self.in_proj_bias\n if end is not None:\n weight = weight[:end, :, :, :]\n if bias is not None:\n bias = bias[:end]\n if start is not None:\n weight = weight[start:, :, :, :]\n if bias is not None:\n bias = bias[start:]\n return F.conv2d(input, weight, bias=bias, padding=1)\n\n def buffered_mask(self, tensor):\n dim = tensor.size(-1)\n if self._mask is None:\n self._mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)\n if self._mask.size(0) < dim:\n self._mask = torch.triu(utils.fill_with_neg_inf(self._mask.resize_(dim, dim)), 1)\n return self._mask[:dim, :dim]\n\n def reorder_incremental_state(self, incremental_state, new_order):\n \"\"\"Reorder buffered internal state (for incremental generation).\"\"\"\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is not None:\n for k in input_buffer.keys():\n input_buffer[k] = input_buffer[k].index_select(1, new_order)\n self._set_input_buffer(incremental_state, input_buffer)\n\n def _get_input_buffer(self, incremental_state):\n return utils.get_incremental_state(\n self,\n incremental_state,\n 'attn_state',\n ) or {}\n\n def _set_input_buffer(self, incremental_state, buffer):\n utils.set_incremental_state(\n self,\n incremental_state,\n 'attn_state',\n buffer,\n )\n\ndef LayerNorm(embedding_dim):\n m = nn.LayerNorm(embedding_dim)\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n return m\n\n\ndef BatchNorm(embedding_dim):\n m = nn.BatchNorm2d(embedding_dim)\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n return m\n"
] |
[
[
"torch.Tensor",
"torch.nn.functional.dropout",
"torch.nn.init.constant_",
"torch.cat",
"torch.nn.functional.conv2d",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.nn.init.xavier_normal_",
"torch.arange",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.log",
"torch.bmm",
"torch.nn.BatchNorm2d",
"torch.nn.init.xavier_uniform_",
"torch.nn.functional.linear"
]
] |
sopanpatil/exp-hydro
|
[
"7295dddc4df1028f669a223e1b631a4a91669515"
] |
[
"exphydro/distributed/type1/ExphydroDistrModel.py"
] |
[
"#!/usr/bin/env python\n\n# Programmer(s): Sopan Patil.\n# This file is part of the 'exphydro.distributed.type1' package.\n\nimport numpy\nfrom exphydro.lumped import ExphydroModel\n\n######################################################################\n\n\nclass ExphydroDistrModel(object):\n\n def __init__(self, p, pet, t, npixels):\n\n \"\"\" This method is used to initialise, i.e., create an instance of the ExphydroDistrModel class.\n\n Syntax: ExphydroDistrModel(p, pet, t, npixels)\n\n Args:\n (1) p: Daily precipitation time-series (mm/day)\n (2) pet: Daily potential evapotranspiration time-series (mm/day)\n (3) t: Daily mean air temperature time-series (deg C)\n (4) npixels: Number of pixels in the catchment\n\n \"\"\"\n\n # The statement below creates npixel instances of the lumped EXP-HYDRO model\n self.model = [ExphydroModel(p, pet, t) for j in range(npixels)]\n\n self.timespan = p.shape[0] # Time length of the simulation period\n self.qsimtmp = numpy.zeros(self.timespan) # Variable to temporarily store pixel's streamflow output\n self.qsim = numpy.zeros(self.timespan) # Simulated streamflow (mm/day)\n\n # ----------------------------------------------------------------\n\n def simulate(self, para):\n\n \"\"\" This method simulates the EXP-HYDRO model over all pixels\n and provides a combined streamflow output.\n \"\"\"\n\n npixels = para.pixels\n\n # In the for loop below, each instance of lumped EXP-HYDRO model\n # is run npixel times.\n for i in range(npixels):\n self.qsimtmp = self.model[i].simulate(para.params[i])\n\n # Averaging the Q output of all pixels\n if i == 0:\n self.qsim = self.qsimtmp\n else:\n self.qsim = (self.qsim + self.qsimtmp)*0.5\n\n return self.qsim\n\n######################################################################\n"
] |
[
[
"numpy.zeros"
]
] |
Sanger2000/transformers
|
[
"5de2046e12cffa5a0381219363cd521e8fe1a2bb"
] |
[
"src/transformers/trainer.py"
] |
[
"# coding=utf-8\n# Copyright 2020-present the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThe Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.\n\"\"\"\n\nimport contextlib\nimport inspect\nimport math\nimport os\nimport random\nimport re\nimport shutil\nimport sys\nimport time\nimport warnings\nfrom collections.abc import Mapping\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union\n\nfrom tqdm.auto import tqdm\n\n\n# Integrations must be imported before ML frameworks:\nfrom .integrations import ( # isort: split\n default_hp_search_backend,\n get_reporting_integration_callbacks,\n hp_params,\n is_fairscale_available,\n is_optuna_available,\n is_ray_tune_available,\n is_sigopt_available,\n is_wandb_available,\n run_hp_search_optuna,\n run_hp_search_ray,\n run_hp_search_sigopt,\n run_hp_search_wandb,\n)\n\nimport numpy as np\nimport torch\nfrom packaging import version\nfrom torch import nn\nfrom torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom huggingface_hub import Repository\n\nfrom . import __version__\nfrom .configuration_utils import PretrainedConfig\nfrom .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator\nfrom .debug_utils import DebugOption, DebugUnderflowOverflow\nfrom .deepspeed import deepspeed_init, deepspeed_reinit, is_deepspeed_zero3_enabled\nfrom .dependency_versions_check import dep_version_check\nfrom .file_utils import (\n CONFIG_NAME,\n WEIGHTS_NAME,\n get_full_repo_name,\n is_apex_available,\n is_datasets_available,\n is_in_notebook,\n is_sagemaker_dp_enabled,\n is_sagemaker_mp_enabled,\n is_torch_tpu_available,\n)\nfrom .modelcard import TrainingSummary\nfrom .modeling_utils import PreTrainedModel, unwrap_model\nfrom .models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES\nfrom .optimization import Adafactor, get_scheduler\nfrom .tokenization_utils_base import PreTrainedTokenizerBase\nfrom .trainer_callback import (\n CallbackHandler,\n DefaultFlowCallback,\n PrinterCallback,\n ProgressCallback,\n TrainerCallback,\n TrainerControl,\n TrainerState,\n)\nfrom .trainer_pt_utils import (\n DistributedLengthGroupedSampler,\n DistributedSamplerWithLoop,\n DistributedTensorGatherer,\n IterableDatasetShard,\n LabelSmoother,\n LengthGroupedSampler,\n SequentialDistributedSampler,\n ShardSampler,\n distributed_broadcast_scalars,\n distributed_concat,\n find_batch_size,\n get_parameter_names,\n nested_concat,\n nested_detach,\n nested_numpify,\n nested_truncate,\n nested_xla_mesh_reduce,\n reissue_pt_warnings,\n)\nfrom .trainer_utils import (\n PREFIX_CHECKPOINT_DIR,\n BestRun,\n EvalLoopOutput,\n EvalPrediction,\n HPSearchBackend,\n HubStrategy,\n IntervalStrategy,\n PredictionOutput,\n ShardedDDPOption,\n TrainerMemoryTracker,\n TrainOutput,\n default_compute_objective,\n default_hp_space,\n denumpify_detensorize,\n get_last_checkpoint,\n has_length,\n number_of_arguments,\n set_seed,\n speed_metrics,\n)\nfrom .training_args import OptimizerNames, ParallelMode, TrainingArguments\nfrom .utils import logging\n\n\n_is_torch_generator_available = False\n_is_native_amp_available = False\n\nDEFAULT_CALLBACKS = [DefaultFlowCallback]\nDEFAULT_PROGRESS_CALLBACK = ProgressCallback\n\nif is_in_notebook():\n from .utils.notebook import NotebookProgressCallback\n\n DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback\n\nif is_apex_available():\n from apex import amp\n\nif version.parse(torch.__version__) >= version.parse(\"1.6\"):\n _is_torch_generator_available = True\n _is_native_amp_available = True\n from torch.cuda.amp import autocast\n\nif is_datasets_available():\n import datasets\n\nif is_torch_tpu_available():\n import torch_xla.core.xla_model as xm\n import torch_xla.debug.metrics as met\n import torch_xla.distributed.parallel_loader as pl\n\nif is_fairscale_available():\n dep_version_check(\"fairscale\")\n import fairscale\n from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP\n from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP\n from fairscale.nn.wrap import auto_wrap\n from fairscale.optim import OSS\n from fairscale.optim.grad_scaler import ShardedGradScaler\n\nif is_sagemaker_dp_enabled():\n import smdistributed.dataparallel.torch.distributed as dist\n from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP\nelse:\n import torch.distributed as dist\n\nif is_sagemaker_mp_enabled():\n import smdistributed.modelparallel.torch as smp\n\n from .trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat\n\n\nif TYPE_CHECKING:\n import optuna\n\nlogger = logging.get_logger(__name__)\n\n\n# Name of the files used for checkpointing\nTRAINING_ARGS_NAME = \"training_args.bin\"\nTRAINER_STATE_NAME = \"trainer_state.json\"\nOPTIMIZER_NAME = \"optimizer.pt\"\nSCHEDULER_NAME = \"scheduler.pt\"\nSCALER_NAME = \"scaler.pt\"\n\n\nclass Trainer:\n \"\"\"\n Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.\n\n Args:\n model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*):\n The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed.\n\n <Tip>\n\n [`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use\n your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers\n models.\n\n </Tip>\n\n args ([`TrainingArguments`], *optional*):\n The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the\n `output_dir` set to a directory named *tmp_trainer* in the current directory if not provided.\n data_collator (`DataCollator`, *optional*):\n The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will\n default to [`default_data_collator`] if no `tokenizer` is provided, an instance of\n [`DataCollatorWithPadding`] otherwise.\n train_dataset (`torch.utils.data.Dataset` or `torch.utils.data.IterableDataset`, *optional*):\n The dataset to use for training. If it is an `datasets.Dataset`, columns not accepted by the\n `model.forward()` method are automatically removed.\n\n Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a\n distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a\n `torch.Generator` for the randomization that must be identical on all processes (and the Trainer will\n manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally\n sets the seed of the RNGs used.\n eval_dataset (`torch.utils.data.Dataset`, *optional*):\n The dataset to use for evaluation. If it is an `datasets.Dataset`, columns not accepted by the\n `model.forward()` method are automatically removed.\n tokenizer ([`PreTrainedTokenizerBase`], *optional*):\n The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the\n maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an\n interrupted training or reuse the fine-tuned model.\n model_init (`Callable[[], PreTrainedModel]`, *optional*):\n A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start\n from a new instance of the model as given by this function.\n\n The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to\n be able to choose different architectures according to hyper parameters (such as layer count, sizes of\n inner layers, dropout probabilities etc).\n compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*):\n The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return\n a dictionary string to metric values.\n callbacks (List of [`TrainerCallback`], *optional*):\n A list of callbacks to customize the training loop. Will add those to the list of default callbacks\n detailed in [here](callback).\n\n If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method.\n optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*): A tuple\n containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your model\n and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`.\n preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*):\n A function that preprocess the logits right before caching them at each evaluation step. Must take two\n tensors, the logits and the labels, and return the logits once processed as desired. The modifications made\n by this function will be reflected in the predictions received by `compute_metrics`.\n\n Note that the labels (second parameter) will be `None` if the dataset does not have them.\n\n Important attributes:\n\n - **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`]\n subclass.\n - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the\n original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`,\n the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner\n model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`.\n - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from\n data parallelism, this means some of the model layers are split on different GPUs).\n - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set\n to `False` if model parallel or deepspeed is used, or if the default\n `TrainingArguments.place_model_on_device` is overridden to return `False` .\n - **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while\n in `train`)\n\n \"\"\"\n\n from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state\n\n def __init__(\n self,\n model: Union[PreTrainedModel, nn.Module] = None,\n args: TrainingArguments = None,\n data_collator: Optional[DataCollator] = None,\n train_dataset: Optional[Dataset] = None,\n eval_dataset: Optional[Dataset] = None,\n tokenizer: Optional[PreTrainedTokenizerBase] = None,\n model_init: Callable[[], PreTrainedModel] = None,\n compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,\n callbacks: Optional[List[TrainerCallback]] = None,\n optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),\n preprocess_logits_for_metrics: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None,\n ):\n if args is None:\n output_dir = \"tmp_trainer\"\n logger.info(f\"No `TrainingArguments` passed, using `output_dir={output_dir}`.\")\n args = TrainingArguments(output_dir=output_dir)\n self.args = args\n # Seed must be set before instantiating the model when using model\n set_seed(self.args.seed)\n self.hp_name = None\n self.deepspeed = None\n self.is_in_train = False\n\n # memory metrics - must set up as early as possible\n self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)\n self._memory_tracker.start()\n\n # set the correct log level depending on the node\n log_level = args.get_process_log_level()\n logging.set_verbosity(log_level)\n\n # force device and distributed setup init explicitly\n args._setup_devices\n\n if model is None:\n if model_init is not None:\n self.model_init = model_init\n model = self.call_model_init()\n else:\n raise RuntimeError(\"`Trainer` requires either a `model` or `model_init` argument\")\n else:\n if model_init is not None:\n warnings.warn(\n \"`Trainer` requires either a `model` or `model_init` argument, but not both. \"\n \"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.\",\n FutureWarning,\n )\n self.model_init = model_init\n\n if hasattr(model, \"is_parallelizable\") and model.is_parallelizable and model.model_parallel:\n self.is_model_parallel = True\n else:\n self.is_model_parallel = False\n\n # Setup Sharded DDP training\n self.sharded_ddp = None\n if len(args.sharded_ddp) > 0:\n if args.deepspeed:\n raise ValueError(\n \"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags.\"\n )\n\n if args.local_rank == -1:\n raise ValueError(\"Using sharded DDP only works in distributed training.\")\n elif not is_fairscale_available():\n raise ImportError(\"Sharded DDP training requires fairscale: `pip install fairscale`.\")\n elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:\n raise ImportError(\n \"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found \"\n f\"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`.\"\n )\n elif ShardedDDPOption.SIMPLE in args.sharded_ddp:\n self.sharded_ddp = ShardedDDPOption.SIMPLE\n elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:\n self.sharded_ddp = ShardedDDPOption.ZERO_DP_2\n elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:\n self.sharded_ddp = ShardedDDPOption.ZERO_DP_3\n\n # one place to sort out whether to place the model on device or not\n # postpone switching model to cuda when:\n # 1. MP - since we are trying to fit a much bigger than 1 gpu model\n # 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,\n # and we only use deepspeed for training at the moment\n # 3. full bf16 or fp16 eval - since the model needs to be cast to the right dtype first\n # 4. Sharded DDP - same as MP\n self.place_model_on_device = args.place_model_on_device\n if (\n self.is_model_parallel\n or args.deepspeed\n or ((args.fp16_full_eval or args.bf16_full_eval) and not args.do_train)\n or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])\n ):\n self.place_model_on_device = False\n\n default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)\n self.data_collator = data_collator if data_collator is not None else default_collator\n self.train_dataset = train_dataset\n self.eval_dataset = eval_dataset\n self.tokenizer = tokenizer\n\n if self.place_model_on_device:\n self._move_model_to_device(model, args.device)\n\n # Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs\n if self.is_model_parallel:\n self.args._n_gpu = 1\n\n # later use `self.model is self.model_wrapped` to check if it's wrapped or not\n self.model_wrapped = model\n self.model = model\n\n self.compute_metrics = compute_metrics\n self.preprocess_logits_for_metrics = preprocess_logits_for_metrics\n self.optimizer, self.lr_scheduler = optimizers\n if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):\n raise RuntimeError(\n \"Passing a `model_init` is incompatible with providing the `optimizers` argument. \"\n \"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method.\"\n )\n default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)\n callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks\n self.callback_handler = CallbackHandler(\n callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler\n )\n self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)\n\n # Will be set to True by `self._setup_loggers()` on first call to `self.log()`.\n self._loggers_initialized = False\n\n # Create clone of distant repo and output directory if needed\n if self.args.push_to_hub:\n self.init_git_repo(at_init=True)\n # In case of pull, we need to make sure every process has the latest.\n if is_torch_tpu_available():\n xm.rendezvous(\"init git repo\")\n elif args.local_rank != -1:\n dist.barrier()\n\n if self.args.should_save:\n os.makedirs(self.args.output_dir, exist_ok=True)\n\n if not callable(self.data_collator) and callable(getattr(self.data_collator, \"collate_batch\", None)):\n raise ValueError(\"The `data_collator` should be a simple callable (function, class with `__call__`).\")\n\n if args.max_steps > 0:\n logger.info(\"max_steps is given, it will override any value given in num_train_epochs\")\n\n if train_dataset is not None and not has_length(train_dataset) and args.max_steps <= 0:\n raise ValueError(\"train_dataset does not implement __len__, max_steps has to be specified\")\n\n if (\n train_dataset is not None\n and isinstance(train_dataset, torch.utils.data.IterableDataset)\n and args.group_by_length\n ):\n raise ValueError(\"the `--group_by_length` option is only available for `Dataset`, not `IterableDataset\")\n\n self._signature_columns = None\n\n # Mixed precision setup\n self.use_apex = False\n self.use_amp = False\n\n if args.fp16 or args.bf16:\n if args.half_precision_backend == \"auto\":\n if _is_native_amp_available:\n args.half_precision_backend = \"amp\"\n else:\n if args.bf16:\n raise ValueError(\"Tried to use `bf16` but native amp is not available\")\n else:\n args.half_precision_backend = \"apex\"\n logger.info(f\"Using {args.half_precision_backend} half precision backend\")\n\n self.do_grad_scaling = False\n if (args.fp16 or args.bf16) and not args.deepspeed: # deepspeed manages its own half precision\n if args.half_precision_backend == \"amp\":\n self.use_amp = True\n self.amp_dtype = torch.float16 if args.fp16 else torch.bfloat16\n self.do_grad_scaling = True\n if is_sagemaker_mp_enabled():\n self.scaler = smp.amp.GradScaler()\n elif self.sharded_ddp is not None:\n self.scaler = ShardedGradScaler()\n elif is_torch_tpu_available():\n from torch_xla.amp import GradScaler\n\n self.scaler = GradScaler()\n else:\n self.scaler = torch.cuda.amp.GradScaler()\n else:\n if not is_apex_available():\n raise ImportError(\n \"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex.\"\n )\n self.use_apex = True\n\n # FP16 + model parallelism in SageMaker: gradient clipping does not work for now so we raise a helpful error.\n if is_sagemaker_mp_enabled() and self.use_amp and args.max_grad_norm is not None and args.max_grad_norm > 0:\n raise ValueError(\n \"SageMaker Model Parallelism in mixed precision mode does not support gradient clipping yet. Pass \"\n \"along 'max_grad_norm': 0 in your hyperparameters.\"\n )\n\n # Label smoothing\n if self.args.label_smoothing_factor != 0:\n self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)\n else:\n self.label_smoother = None\n\n self.state = TrainerState()\n self.control = TrainerControl()\n # Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then\n # returned to 0 every time flos need to be logged\n self.current_flos = 0\n self.hp_search_backend = None\n self.use_tune_checkpoints = False\n default_label_names = (\n [\"start_positions\", \"end_positions\"]\n if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()\n else [\"labels\"]\n )\n self.label_names = default_label_names if self.args.label_names is None else self.args.label_names\n self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)\n\n # very last\n self._memory_tracker.stop_and_update_metrics()\n\n def add_callback(self, callback):\n \"\"\"\n Add a callback to the current list of [`~transformer.TrainerCallback`].\n\n Args:\n callback (`type` or [`~transformer.TrainerCallback`]):\n A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the\n first case, will instantiate a member of that class.\n \"\"\"\n self.callback_handler.add_callback(callback)\n\n def pop_callback(self, callback):\n \"\"\"\n Remove a callback from the current list of [`~transformer.TrainerCallback`] and returns it.\n\n If the callback is not found, returns `None` (and no error is raised).\n\n Args:\n callback (`type` or [`~transformer.TrainerCallback`]):\n A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the\n first case, will pop the first member of that class found in the list of callbacks.\n\n Returns:\n [`~transformer.TrainerCallback`]: The callback removed, if found.\n \"\"\"\n return self.callback_handler.pop_callback(callback)\n\n def remove_callback(self, callback):\n \"\"\"\n Remove a callback from the current list of [`~transformer.TrainerCallback`].\n\n Args:\n callback (`type` or [`~transformer.TrainerCallback`]):\n A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the\n first case, will remove the first member of that class found in the list of callbacks.\n \"\"\"\n self.callback_handler.remove_callback(callback)\n\n def _move_model_to_device(self, model, device):\n model = model.to(device)\n # Moving a model to an XLA device disconnects the tied weights, so we have to retie them.\n if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, \"tie_weights\"):\n model.tie_weights()\n\n def _remove_unused_columns(self, dataset: \"datasets.Dataset\", description: Optional[str] = None):\n if not self.args.remove_unused_columns:\n return dataset\n if self._signature_columns is None:\n # Inspect model forward signature to keep only the arguments it accepts.\n signature = inspect.signature(self.model.forward)\n self._signature_columns = list(signature.parameters.keys())\n # Labels may be named label or label_ids, the default data collator handles that.\n self._signature_columns += [\"label\", \"label_ids\"]\n\n ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))\n if len(ignored_columns) > 0:\n dset_description = \"\" if description is None else f\"in the {description} set \"\n logger.info(\n f\"The following columns {dset_description} don't have a corresponding argument in \"\n f\"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}.\"\n f\" If {', '.join(ignored_columns)} are not expected by `{self.model.__class__.__name__}.forward`, \"\n f\" you can safely ignore this message.\"\n )\n\n columns = [k for k in self._signature_columns if k in dataset.column_names]\n\n if version.parse(datasets.__version__) < version.parse(\"1.4.0\"):\n dataset.set_format(\n type=dataset.format[\"type\"], columns=columns, format_kwargs=dataset.format[\"format_kwargs\"]\n )\n return dataset\n else:\n return dataset.remove_columns(ignored_columns)\n\n def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:\n if not has_length(self.train_dataset):\n return None\n\n generator = None\n if self.args.world_size <= 1 and _is_torch_generator_available:\n generator = torch.Generator()\n generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))\n\n # Build the sampler.\n if self.args.group_by_length:\n if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset):\n lengths = (\n self.train_dataset[self.args.length_column_name]\n if self.args.length_column_name in self.train_dataset.column_names\n else None\n )\n else:\n lengths = None\n model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None\n if self.args.world_size <= 1:\n return LengthGroupedSampler(\n self.args.train_batch_size * self.args.gradient_accumulation_steps,\n dataset=self.train_dataset,\n lengths=lengths,\n model_input_name=model_input_name,\n generator=generator,\n )\n else:\n return DistributedLengthGroupedSampler(\n self.args.train_batch_size * self.args.gradient_accumulation_steps,\n dataset=self.train_dataset,\n num_replicas=self.args.world_size,\n rank=self.args.process_index,\n lengths=lengths,\n model_input_name=model_input_name,\n seed=self.args.seed,\n )\n\n else:\n if self.args.world_size <= 1:\n if _is_torch_generator_available:\n return RandomSampler(self.train_dataset, generator=generator)\n return RandomSampler(self.train_dataset)\n elif (\n self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL]\n and not self.args.dataloader_drop_last\n ):\n # Use a loop for TPUs when drop_last is False to have all batches have the same size.\n return DistributedSamplerWithLoop(\n self.train_dataset,\n batch_size=self.args.per_device_train_batch_size,\n num_replicas=self.args.world_size,\n rank=self.args.process_index,\n seed=self.args.seed,\n )\n else:\n return DistributedSampler(\n self.train_dataset,\n num_replicas=self.args.world_size,\n rank=self.args.process_index,\n seed=self.args.seed,\n )\n\n def get_train_dataloader(self) -> DataLoader:\n \"\"\"\n Returns the training [`~torch.utils.data.DataLoader`].\n\n Will use no sampler if `self.train_dataset` does not implement `__len__`, a random sampler (adapted to\n distributed training if necessary) otherwise.\n\n Subclass and override this method if you want to inject some custom behavior.\n \"\"\"\n if self.train_dataset is None:\n raise ValueError(\"Trainer: training requires a train_dataset.\")\n\n train_dataset = self.train_dataset\n if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):\n train_dataset = self._remove_unused_columns(train_dataset, description=\"training\")\n\n if isinstance(train_dataset, torch.utils.data.IterableDataset):\n if self.args.world_size > 1:\n train_dataset = IterableDatasetShard(\n train_dataset,\n batch_size=self.args.train_batch_size,\n drop_last=self.args.dataloader_drop_last,\n num_processes=self.args.world_size,\n process_index=self.args.process_index,\n )\n\n return DataLoader(\n train_dataset,\n batch_size=self.args.per_device_train_batch_size,\n collate_fn=self.data_collator,\n num_workers=self.args.dataloader_num_workers,\n pin_memory=self.args.dataloader_pin_memory,\n )\n\n train_sampler = self._get_train_sampler()\n\n return DataLoader(\n train_dataset,\n batch_size=self.args.train_batch_size,\n sampler=train_sampler,\n collate_fn=self.data_collator,\n drop_last=self.args.dataloader_drop_last,\n num_workers=self.args.dataloader_num_workers,\n pin_memory=self.args.dataloader_pin_memory,\n )\n\n def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.Sampler]:\n # Deprecated code\n if self.args.use_legacy_prediction_loop:\n if is_torch_tpu_available():\n return SequentialDistributedSampler(\n eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()\n )\n elif is_sagemaker_mp_enabled():\n return SequentialDistributedSampler(\n eval_dataset,\n num_replicas=smp.dp_size(),\n rank=smp.dp_rank(),\n batch_size=self.args.per_device_eval_batch_size,\n )\n elif self.args.local_rank != -1:\n return SequentialDistributedSampler(eval_dataset)\n else:\n return SequentialSampler(eval_dataset)\n\n if self.args.world_size <= 1:\n return SequentialSampler(eval_dataset)\n else:\n return ShardSampler(\n eval_dataset,\n batch_size=self.args.per_device_eval_batch_size,\n num_processes=self.args.world_size,\n process_index=self.args.process_index,\n )\n\n def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:\n \"\"\"\n Returns the evaluation [`~torch.utils.data.DataLoader`].\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n eval_dataset (`torch.utils.data.Dataset`, *optional*):\n If provided, will override `self.eval_dataset`. If it is an `datasets.Dataset`, columns not accepted by\n the `model.forward()` method are automatically removed. It must implement `__len__`.\n \"\"\"\n if eval_dataset is None and self.eval_dataset is None:\n raise ValueError(\"Trainer: evaluation requires an eval_dataset.\")\n eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset\n\n if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):\n eval_dataset = self._remove_unused_columns(eval_dataset, description=\"evaluation\")\n\n if isinstance(eval_dataset, torch.utils.data.IterableDataset):\n if self.args.world_size > 1:\n eval_dataset = IterableDatasetShard(\n eval_dataset,\n batch_size=self.args.per_device_eval_batch_size,\n drop_last=self.args.dataloader_drop_last,\n num_processes=self.args.world_size,\n process_index=self.args.process_index,\n )\n return DataLoader(\n eval_dataset,\n batch_size=self.args.eval_batch_size,\n collate_fn=self.data_collator,\n num_workers=self.args.dataloader_num_workers,\n pin_memory=self.args.dataloader_pin_memory,\n )\n\n eval_sampler = self._get_eval_sampler(eval_dataset)\n\n return DataLoader(\n eval_dataset,\n sampler=eval_sampler,\n batch_size=self.args.eval_batch_size,\n collate_fn=self.data_collator,\n drop_last=self.args.dataloader_drop_last,\n num_workers=self.args.dataloader_num_workers,\n pin_memory=self.args.dataloader_pin_memory,\n )\n\n def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:\n \"\"\"\n Returns the test [`~torch.utils.data.DataLoader`].\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n test_dataset (`torch.utils.data.Dataset`, *optional*):\n The test dataset to use. If it is an `datasets.Dataset`, columns not accepted by the `model.forward()`\n method are automatically removed. It must implement `__len__`.\n \"\"\"\n if is_datasets_available() and isinstance(test_dataset, datasets.Dataset):\n test_dataset = self._remove_unused_columns(test_dataset, description=\"test\")\n\n if isinstance(test_dataset, torch.utils.data.IterableDataset):\n if self.args.world_size > 1:\n test_dataset = IterableDatasetShard(\n test_dataset,\n batch_size=self.args.eval_batch_size,\n drop_last=self.args.dataloader_drop_last,\n num_processes=self.args.world_size,\n process_index=self.args.process_index,\n )\n return DataLoader(\n test_dataset,\n batch_size=self.args.eval_batch_size,\n collate_fn=self.data_collator,\n num_workers=self.args.dataloader_num_workers,\n pin_memory=self.args.dataloader_pin_memory,\n )\n\n test_sampler = self._get_eval_sampler(test_dataset)\n\n # We use the same batch_size as for eval.\n return DataLoader(\n test_dataset,\n sampler=test_sampler,\n batch_size=self.args.eval_batch_size,\n collate_fn=self.data_collator,\n drop_last=self.args.dataloader_drop_last,\n pin_memory=self.args.dataloader_pin_memory,\n )\n\n def create_optimizer_and_scheduler(self, num_training_steps: int):\n \"\"\"\n Setup the optimizer and the learning rate scheduler.\n\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or\n `create_scheduler`) in a subclass.\n \"\"\"\n self.create_optimizer()\n self.create_scheduler(num_training_steps=num_training_steps, optimizer=self.optimizer)\n\n def create_optimizer(self):\n \"\"\"\n Setup the optimizer.\n\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through `optimizers`, or subclass and override this method in a subclass.\n \"\"\"\n if self.optimizer is None:\n decay_parameters = get_parameter_names(self.model, [nn.LayerNorm])\n decay_parameters = [name for name in decay_parameters if \"bias\" not in name]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.model.named_parameters() if n in decay_parameters],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.model.named_parameters() if n not in decay_parameters],\n \"weight_decay\": 0.0,\n },\n ]\n\n optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args)\n\n if self.sharded_ddp == ShardedDDPOption.SIMPLE:\n self.optimizer = OSS(\n params=optimizer_grouped_parameters,\n optim=optimizer_cls,\n **optimizer_kwargs,\n )\n else:\n self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)\n\n if is_sagemaker_mp_enabled():\n self.optimizer = smp.DistributedOptimizer(self.optimizer)\n\n return self.optimizer\n\n @staticmethod\n def get_optimizer_cls_and_kwargs(args: TrainingArguments) -> Tuple[Any, Any]:\n \"\"\"\n Returns the optimizer class and optimizer parameters based on the training arguments.\n\n Args:\n args (`transformers.training_args.TrainingArguments`):\n The training arguments for the training session.\n\n \"\"\"\n optimizer_kwargs = {\"lr\": args.learning_rate}\n adam_kwargs = {\n \"betas\": (args.adam_beta1, args.adam_beta2),\n \"eps\": args.adam_epsilon,\n }\n if args.optim == OptimizerNames.ADAFACTOR:\n optimizer_cls = Adafactor\n optimizer_kwargs.update({\"scale_parameter\": False, \"relative_step\": False})\n elif args.optim == OptimizerNames.ADAMW_HF:\n from .optimization import AdamW\n\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n elif args.optim == OptimizerNames.ADAMW_TORCH:\n from torch.optim import AdamW\n\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n elif args.optim == OptimizerNames.ADAMW_TORCH_XLA:\n try:\n from torch_xla.amp.syncfree import AdamW\n\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError(\"Trainer failed to import syncfree AdamW from torch_xla.\")\n elif args.optim == OptimizerNames.ADAMW_APEX_FUSED:\n try:\n from apex.optimizers import FusedAdam\n\n optimizer_cls = FusedAdam\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError(\"Trainer tried to instantiate apex FusedAdam but apex is not installed!\")\n else:\n raise ValueError(f\"Trainer cannot instantiate unsupported optimizer: {args.optim}\")\n return optimizer_cls, optimizer_kwargs\n\n def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):\n \"\"\"\n Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or\n passed as an argument.\n\n Args:\n num_training_steps (int): The number of training steps to do.\n \"\"\"\n if self.lr_scheduler is None:\n self.lr_scheduler = get_scheduler(\n self.args.lr_scheduler_type,\n optimizer=self.optimizer if optimizer is None else optimizer,\n num_warmup_steps=self.args.get_warmup_steps(num_training_steps),\n num_training_steps=num_training_steps,\n )\n return self.lr_scheduler\n\n def num_examples(self, dataloader: DataLoader) -> int:\n \"\"\"\n Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset.\n\n Will raise an exception if the underlying dataset does not implement method `__len__`\n \"\"\"\n return len(dataloader.dataset)\n\n def _hp_search_setup(self, trial: Union[\"optuna.Trial\", Dict[str, Any]]):\n \"\"\"HP search setup code\"\"\"\n self._trial = trial\n\n if self.hp_search_backend is None or trial is None:\n return\n if self.hp_search_backend == HPSearchBackend.OPTUNA:\n params = self.hp_space(trial)\n elif self.hp_search_backend == HPSearchBackend.RAY:\n params = trial\n params.pop(\"wandb\", None)\n elif self.hp_search_backend == HPSearchBackend.SIGOPT:\n params = {k: int(v) if isinstance(v, str) else v for k, v in trial.assignments.items()}\n elif self.hp_search_backend == HPSearchBackend.WANDB:\n params = trial\n\n for key, value in params.items():\n if not hasattr(self.args, key):\n logger.warning(\n f\"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`.\"\n )\n continue\n old_attr = getattr(self.args, key, None)\n # Casting value to the proper type\n if old_attr is not None:\n value = type(old_attr)(value)\n setattr(self.args, key, value)\n if self.hp_search_backend == HPSearchBackend.OPTUNA:\n logger.info(\"Trial:\", trial.params)\n if self.hp_search_backend == HPSearchBackend.SIGOPT:\n logger.info(f\"SigOpt Assignments: {trial.assignments}\")\n if self.hp_search_backend == HPSearchBackend.WANDB:\n logger.info(f\"W&B Sweep parameters: {trial}\")\n if self.args.deepspeed:\n # Rebuild the deepspeed config to reflect the updated training parameters\n from transformers.deepspeed import HfDeepSpeedConfig\n\n self.args.hf_deepspeed_config = HfDeepSpeedConfig(self.args)\n\n def _report_to_hp_search(\n self, trial: Union[\"optuna.Trial\", Dict[str, Any]], epoch: int, metrics: Dict[str, float]\n ):\n if self.hp_search_backend is None or trial is None:\n return\n self.objective = self.compute_objective(metrics.copy())\n if self.hp_search_backend == HPSearchBackend.OPTUNA:\n import optuna\n\n trial.report(self.objective, epoch)\n if trial.should_prune():\n raise optuna.TrialPruned()\n elif self.hp_search_backend == HPSearchBackend.RAY:\n from ray import tune\n\n if self.control.should_save:\n self._tune_save_checkpoint()\n tune.report(objective=self.objective, **metrics)\n\n def _tune_save_checkpoint(self):\n from ray import tune\n\n if not self.use_tune_checkpoints:\n return\n with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:\n output_dir = os.path.join(checkpoint_dir, f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\")\n self.save_model(output_dir, _internal_call=True)\n if self.args.should_save:\n self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME))\n torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))\n torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))\n\n def call_model_init(self, trial=None):\n model_init_argcount = number_of_arguments(self.model_init)\n if model_init_argcount == 0:\n model = self.model_init()\n elif model_init_argcount == 1:\n model = self.model_init(trial)\n else:\n raise RuntimeError(\"model_init should have 0 or 1 argument.\")\n\n if model is None:\n raise RuntimeError(\"model_init should not return None.\")\n\n return model\n\n def _wrap_model(self, model, training=True):\n if is_sagemaker_mp_enabled():\n # Wrapping the base model twice in a DistributedModel will raise an error.\n if isinstance(self.model_wrapped, smp.model.DistributedModel):\n return self.model_wrapped\n return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps)\n\n # already initialized its own DDP and AMP\n if self.deepspeed:\n return self.deepspeed\n\n # train/eval could be run multiple-times - if already wrapped, don't re-wrap it again\n if unwrap_model(model) is not model:\n return model\n\n # Mixed precision training with apex (torch < 1.6)\n if self.use_apex and training:\n model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)\n\n # Multi-gpu training (should be after apex fp16 initialization)\n if self.args.n_gpu > 1:\n model = nn.DataParallel(model)\n\n # Note: in torch.distributed mode, there's no point in wrapping the model\n # inside a DistributedDataParallel as we'll be under `no_grad` anyways.\n if not training:\n return model\n\n # Distributed training (should be after apex fp16 initialization)\n if self.sharded_ddp is not None:\n # Sharded DDP!\n if self.sharded_ddp == ShardedDDPOption.SIMPLE:\n model = ShardedDDP(model, self.optimizer)\n else:\n mixed_precision = self.args.fp16 or self.args.bf16\n cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp\n zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3\n # XXX: Breaking the self.model convention but I see no way around it for now.\n if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:\n model = auto_wrap(model)\n self.model = model = FullyShardedDDP(\n model,\n mixed_precision=mixed_precision,\n reshard_after_forward=zero_3,\n cpu_offload=cpu_offload,\n ).to(self.args.device)\n\n elif is_sagemaker_dp_enabled():\n model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)\n elif self.args.local_rank != -1:\n kwargs = {}\n if self.args.ddp_find_unused_parameters is not None:\n kwargs[\"find_unused_parameters\"] = self.args.ddp_find_unused_parameters\n elif isinstance(model, PreTrainedModel):\n # find_unused_parameters breaks checkpointing as per\n # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021\n kwargs[\"find_unused_parameters\"] = not model.is_gradient_checkpointing\n else:\n kwargs[\"find_unused_parameters\"] = True\n\n if self.args.ddp_bucket_cap_mb is not None:\n kwargs[\"bucket_cap_mb\"] = self.args.ddp_bucket_cap_mb\n model = nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.args.local_rank] if self.args._n_gpu != 0 else None,\n output_device=self.args.local_rank if self.args._n_gpu != 0 else None,\n **kwargs,\n )\n\n return model\n\n def train(\n self,\n resume_from_checkpoint: Optional[Union[str, bool]] = None,\n trial: Union[\"optuna.Trial\", Dict[str, Any]] = None,\n ignore_keys_for_eval: Optional[List[str]] = None,\n **kwargs,\n ):\n \"\"\"\n Main training entry point.\n\n Args:\n resume_from_checkpoint (`str` or `bool`, *optional*):\n If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a\n `bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance\n of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here.\n trial (`optuna.Trial` or `Dict[str, Any]`, *optional*):\n The trial run or the hyperparameter dictionary for hyperparameter search.\n ignore_keys_for_eval (`List[str]`, *optional*)\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions for evaluation during the training.\n kwargs:\n Additional keyword arguments used to hide deprecated arguments\n \"\"\"\n resume_from_checkpoint = None if not resume_from_checkpoint else resume_from_checkpoint\n\n # memory metrics - must set up as early as possible\n self._memory_tracker.start()\n\n args = self.args\n\n self.is_in_train = True\n\n # do_train is not a reliable argument, as it might not be set and .train() still called, so\n # the following is a workaround:\n if (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train:\n self._move_model_to_device(self.model, args.device)\n\n if \"model_path\" in kwargs:\n resume_from_checkpoint = kwargs.pop(\"model_path\")\n warnings.warn(\n \"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` \"\n \"instead.\",\n FutureWarning,\n )\n if len(kwargs) > 0:\n raise TypeError(f\"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.\")\n # This might change the seed so needs to run first.\n self._hp_search_setup(trial)\n\n # Model re-init\n model_reloaded = False\n if self.model_init is not None:\n # Seed must be set before instantiating the model when using model_init.\n set_seed(args.seed)\n self.model = self.call_model_init(trial)\n model_reloaded = True\n # Reinitializes optimizer and scheduler\n self.optimizer, self.lr_scheduler = None, None\n\n # Load potential model checkpoint\n if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:\n resume_from_checkpoint = get_last_checkpoint(args.output_dir)\n if resume_from_checkpoint is None:\n raise ValueError(f\"No valid checkpoint found in output directory ({args.output_dir})\")\n\n if resume_from_checkpoint is not None:\n if not os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):\n raise ValueError(f\"Can't find a valid checkpoint at {resume_from_checkpoint}\")\n\n logger.info(f\"Loading model from {resume_from_checkpoint}).\")\n\n if os.path.isfile(os.path.join(resume_from_checkpoint, CONFIG_NAME)):\n config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME))\n checkpoint_version = config.transformers_version\n if checkpoint_version is not None and checkpoint_version != __version__:\n logger.warning(\n f\"You are resuming training from a checkpoint trained with {checkpoint_version} of \"\n f\"Transformers but your current version is {__version__}. This is not recommended and could \"\n \"yield to errors or unwanted behaviors.\"\n )\n\n if args.deepspeed:\n # will be resumed in deepspeed_init\n pass\n else:\n # We load the model state dict on the CPU to avoid an OOM error.\n state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location=\"cpu\")\n # If the model is on the GPU, it still works!\n self._load_state_dict_in_model(state_dict)\n\n # release memory\n del state_dict\n\n # If model was re-initialized, put it on the right device and update self.model_wrapped\n if model_reloaded:\n if self.place_model_on_device:\n self._move_model_to_device(self.model, args.device)\n self.model_wrapped = self.model\n\n # Keeping track whether we can can len() on the dataset or not\n train_dataset_is_sized = has_length(self.train_dataset)\n\n # Data loader and number of training steps\n train_dataloader = self.get_train_dataloader()\n\n # Setting up training control variables:\n # number of training epochs: num_train_epochs\n # number of training steps per epoch: num_update_steps_per_epoch\n # total number of training steps to execute: max_steps\n total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size\n if train_dataset_is_sized:\n num_update_steps_per_epoch = len(train_dataloader) // args.gradient_accumulation_steps\n num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)\n if args.max_steps > 0:\n max_steps = args.max_steps\n num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(\n args.max_steps % num_update_steps_per_epoch > 0\n )\n # May be slightly incorrect if the last batch in the training datalaoder has a smaller size but it's\n # the best we can do.\n num_train_samples = args.max_steps * total_train_batch_size\n else:\n max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)\n num_train_epochs = math.ceil(args.num_train_epochs)\n num_train_samples = len(self.train_dataset) * args.num_train_epochs\n else:\n # see __init__. max_steps is set when the dataset has no __len__\n max_steps = args.max_steps\n # Setting a very large number of epochs so we go as many times as necessary over the iterator.\n num_train_epochs = sys.maxsize\n num_update_steps_per_epoch = max_steps\n num_train_samples = args.max_steps * total_train_batch_size\n\n if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:\n if self.args.n_gpu > 1:\n # nn.DataParallel(model) replicates the model, creating new variables and module\n # references registered here no longer work on other gpus, breaking the module\n raise ValueError(\n \"Currently --debug underflow_overflow is not supported under DP. Please use DDP (torch.distributed.launch).\"\n )\n else:\n debug_overflow = DebugUnderflowOverflow(self.model) # noqa\n\n delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE\n if args.deepspeed:\n deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(\n self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint\n )\n self.model = deepspeed_engine.module\n self.model_wrapped = deepspeed_engine\n self.deepspeed = deepspeed_engine\n self.optimizer = optimizer\n self.lr_scheduler = lr_scheduler\n elif not delay_optimizer_creation:\n self.create_optimizer_and_scheduler(num_training_steps=max_steps)\n\n self.state = TrainerState()\n self.state.is_hyper_param_search = trial is not None\n\n # Activate gradient checkpointing if needed\n if args.gradient_checkpointing:\n self.model.gradient_checkpointing_enable()\n\n model = self._wrap_model(self.model_wrapped)\n\n # for the rest of this function `model` is the outside model, whether it was wrapped or not\n if model is not self.model:\n self.model_wrapped = model\n\n if delay_optimizer_creation:\n self.create_optimizer_and_scheduler(num_training_steps=max_steps)\n\n # Check if saved optimizer or scheduler states exist\n self._load_optimizer_and_scheduler(resume_from_checkpoint)\n\n # important: at this point:\n # self.model is the Transformers Model\n # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.\n\n # Train!\n num_examples = (\n self.num_examples(train_dataloader) if train_dataset_is_sized else total_train_batch_size * args.max_steps\n )\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {num_examples}\")\n logger.info(f\" Num Epochs = {num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {args.per_device_train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {max_steps}\")\n\n self.state.epoch = 0\n start_time = time.time()\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n steps_trained_progress_bar = None\n\n # Check if continuing training from a checkpoint\n if resume_from_checkpoint is not None and os.path.isfile(\n os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)\n ):\n self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME))\n epochs_trained = self.state.global_step // num_update_steps_per_epoch\n if not args.ignore_data_skip:\n steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)\n steps_trained_in_current_epoch *= args.gradient_accumulation_steps\n else:\n steps_trained_in_current_epoch = 0\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(f\" Continuing training from epoch {epochs_trained}\")\n logger.info(f\" Continuing training from global step {self.state.global_step}\")\n if not args.ignore_data_skip:\n logger.info(\n f\" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} \"\n \"batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` \"\n \"flag to your launch command, but you will resume the training on data already seen by your model.\"\n )\n if self.is_local_process_zero() and not args.disable_tqdm:\n steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch)\n steps_trained_progress_bar.set_description(\"Skipping the first batches\")\n\n # Update the references\n self.callback_handler.model = self.model\n self.callback_handler.optimizer = self.optimizer\n self.callback_handler.lr_scheduler = self.lr_scheduler\n self.callback_handler.train_dataloader = train_dataloader\n self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None\n if trial is not None:\n assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial\n self.state.trial_params = hp_params(assignments)\n else:\n self.state.trial_params = None\n # This should be the same if the state has been saved but in case the training arguments changed, it's safer\n # to set this after the load.\n self.state.max_steps = max_steps\n self.state.num_train_epochs = num_train_epochs\n self.state.is_local_process_zero = self.is_local_process_zero()\n self.state.is_world_process_zero = self.is_world_process_zero()\n\n # tr_loss is a tensor to avoid synchronization of TPUs through .item()\n tr_loss = torch.tensor(0.0).to(args.device)\n # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses\n self._total_loss_scalar = 0.0\n self._globalstep_last_logged = self.state.global_step\n model.zero_grad()\n\n self.control = self.callback_handler.on_train_begin(args, self.state, self.control)\n\n # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.\n if not args.ignore_data_skip:\n for epoch in range(epochs_trained):\n # We just need to begin an iteration to create the randomization of the sampler.\n for _ in train_dataloader:\n break\n\n for epoch in range(epochs_trained, num_train_epochs):\n if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):\n train_dataloader.sampler.set_epoch(epoch)\n elif isinstance(train_dataloader.dataset, IterableDatasetShard):\n train_dataloader.dataset.set_epoch(epoch)\n\n if is_torch_tpu_available():\n parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device)\n epoch_iterator = parallel_loader\n else:\n epoch_iterator = train_dataloader\n\n # Reset the past mems state at the beginning of each epoch if necessary.\n if args.past_index >= 0:\n self._past = None\n\n steps_in_epoch = (\n len(epoch_iterator) if train_dataset_is_sized else args.max_steps * args.gradient_accumulation_steps\n )\n self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)\n\n step = -1\n for step, inputs in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n if steps_trained_progress_bar is not None:\n steps_trained_progress_bar.update(1)\n if steps_trained_in_current_epoch == 0:\n self._load_rng_state(resume_from_checkpoint)\n continue\n elif steps_trained_progress_bar is not None:\n steps_trained_progress_bar.close()\n steps_trained_progress_bar = None\n\n if step % args.gradient_accumulation_steps == 0:\n self.control = self.callback_handler.on_step_begin(args, self.state, self.control)\n\n if (\n ((step + 1) % args.gradient_accumulation_steps != 0)\n and args.local_rank != -1\n and args._no_sync_in_gradient_accumulation\n ):\n # Avoid unnecessary DDP synchronization since there will be no backward pass on this example.\n with model.no_sync():\n tr_loss_step = self.training_step(model, inputs)\n else:\n tr_loss_step = self.training_step(model, inputs)\n\n if (\n args.logging_nan_inf_filter\n and not is_torch_tpu_available()\n and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step))\n ):\n # if loss is nan or inf simply add the average of previous logged losses\n tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged)\n else:\n tr_loss += tr_loss_step\n\n self.current_flos += float(self.floating_point_ops(inputs))\n\n # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps\n if self.deepspeed:\n self.deepspeed.step()\n\n if (step + 1) % args.gradient_accumulation_steps == 0 or (\n # last step in epoch but step is always smaller than gradient_accumulation_steps\n steps_in_epoch <= args.gradient_accumulation_steps\n and (step + 1) == steps_in_epoch\n ):\n # Gradient clipping\n if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:\n # deepspeed does its own clipping\n\n if self.do_grad_scaling:\n # Reduce gradients first for XLA\n if is_torch_tpu_available():\n gradients = xm._fetch_gradients(self.optimizer)\n xm.all_reduce(\"sum\", gradients, scale=1.0 / xm.xrt_world_size())\n # AMP: gradients need unscaling\n self.scaler.unscale_(self.optimizer)\n\n if hasattr(self.optimizer, \"clip_grad_norm\"):\n # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping\n self.optimizer.clip_grad_norm(args.max_grad_norm)\n elif hasattr(model, \"clip_grad_norm_\"):\n # Some models (like FullyShardedDDP) have a specific way to do gradient clipping\n model.clip_grad_norm_(args.max_grad_norm)\n else:\n # Revert to normal clipping otherwise, handling Apex or full precision\n nn.utils.clip_grad_norm_(\n amp.master_params(self.optimizer) if self.use_apex else model.parameters(),\n args.max_grad_norm,\n )\n\n # Optimizer step\n optimizer_was_run = True\n if self.deepspeed:\n pass # called outside the loop\n elif is_torch_tpu_available():\n if self.do_grad_scaling:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n xm.optimizer_step(self.optimizer)\n elif self.do_grad_scaling:\n scale_before = self.scaler.get_scale()\n self.scaler.step(self.optimizer)\n self.scaler.update()\n scale_after = self.scaler.get_scale()\n optimizer_was_run = scale_before <= scale_after\n else:\n self.optimizer.step()\n\n if optimizer_was_run and not self.deepspeed:\n self.lr_scheduler.step()\n\n model.zero_grad()\n self.state.global_step += 1\n self.state.epoch = epoch + (step + 1) / steps_in_epoch\n self.control = self.callback_handler.on_step_end(args, self.state, self.control)\n\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)\n else:\n self.control = self.callback_handler.on_substep_end(args, self.state, self.control)\n\n if self.control.should_epoch_stop or self.control.should_training_stop:\n break\n if step < 0:\n logger.warning(\n f\"There seems to be not a single sample in your epoch_iterator, stopping training at step\"\n f\" {self.state.global_step}! This is expected if you're using an IterableDataset and set\"\n f\" num_steps ({max_steps}) higher than the number of available samples.\"\n )\n self.control.should_training_stop = True\n\n self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)\n\n if DebugOption.TPU_METRICS_DEBUG in self.args.debug:\n if is_torch_tpu_available():\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n else:\n logger.warning(\n \"You enabled PyTorch/XLA debug metrics but you don't have a TPU \"\n \"configured. Check your training configuration if this is unexpected.\"\n )\n if self.control.should_training_stop:\n break\n\n if args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of training\n delattr(self, \"_past\")\n\n logger.info(\"\\n\\nTraining completed. Do not forget to share your model on huggingface.co/models =)\\n\\n\")\n if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:\n # Wait for everyone to get here so we are sur the model has been saved by process 0.\n if is_torch_tpu_available():\n xm.rendezvous(\"load_best_model_at_end\")\n elif args.local_rank != -1:\n dist.barrier()\n\n logger.info(\n f\"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).\"\n )\n\n best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME)\n if os.path.exists(best_model_path):\n if self.deepspeed:\n # temp hack until Deepspeed fixes the problem with resume from an existing engine that did some stepping\n deepspeed_engine, optimizer, lr_scheduler = deepspeed_reinit(self)\n self.model = deepspeed_engine.module\n self.model_wrapped = deepspeed_engine\n self.deepspeed = deepspeed_engine\n self.optimizer = optimizer\n self.lr_scheduler = lr_scheduler\n self.deepspeed.load_checkpoint(\n self.state.best_model_checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True\n )\n else:\n # We load the model state dict on the CPU to avoid an OOM error.\n state_dict = torch.load(best_model_path, map_location=\"cpu\")\n # If the model is on the GPU, it still works!\n self._load_state_dict_in_model(state_dict)\n else:\n logger.warning(\n f\"Could not locate the best model at {best_model_path}, if you are running a distributed training \"\n \"on multiple nodes, you should activate `--save_on_each_node`.\"\n )\n\n # add remaining tr_loss\n self._total_loss_scalar += tr_loss.item()\n train_loss = self._total_loss_scalar / self.state.global_step\n\n metrics = speed_metrics(\"train\", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps)\n self.store_flos()\n metrics[\"total_flos\"] = self.state.total_flos\n metrics[\"train_loss\"] = train_loss\n\n self.is_in_train = False\n\n self._memory_tracker.stop_and_update_metrics(metrics)\n\n self.log(metrics)\n\n self.control = self.callback_handler.on_train_end(args, self.state, self.control)\n\n return TrainOutput(self.state.global_step, train_loss, metrics)\n\n def _load_state_dict_in_model(self, state_dict):\n load_result = self.model.load_state_dict(state_dict, strict=False)\n\n if len(load_result.missing_keys) != 0:\n if self.model._keys_to_ignore_on_save is not None and set(load_result.missing_keys) == set(\n self.model._keys_to_ignore_on_save\n ):\n self.model.tie_weights()\n else:\n logger.warning(f\"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.\")\n if len(load_result.unexpected_keys) != 0:\n logger.warning(\n f\"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}.\"\n )\n\n def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval):\n if self.control.should_log:\n if is_torch_tpu_available():\n xm.mark_step()\n\n logs: Dict[str, float] = {}\n\n # all_gather + mean() to get average loss over all processes\n tr_loss_scalar = self._nested_gather(tr_loss).mean().item()\n\n # reset tr_loss to zero\n tr_loss -= tr_loss\n\n logs[\"loss\"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)\n logs[\"learning_rate\"] = self._get_learning_rate()\n\n self._total_loss_scalar += tr_loss_scalar\n self._globalstep_last_logged = self.state.global_step\n self.store_flos()\n\n self.log(logs)\n\n metrics = None\n if self.control.should_evaluate:\n metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)\n self._report_to_hp_search(trial, epoch, metrics)\n\n if self.control.should_save:\n self._save_checkpoint(model, trial, metrics=metrics)\n self.control = self.callback_handler.on_save(self.args, self.state, self.control)\n\n def _load_rng_state(self, checkpoint):\n # Load RNG states from `checkpoint`\n if checkpoint is None:\n return\n\n local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank\n if local_rank != -1:\n rng_file = os.path.join(checkpoint, f\"rng_state_{local_rank}.pth\")\n if not os.path.isfile(os.path.join(checkpoint, rng_file)):\n logger.info(\n f\"Didn't find an RNG file for process {local_rank}, if you are resuming a training that \"\n \"wasn't launched in a distributed fashion, reproducibility is not guaranteed.\"\n )\n return\n else:\n rng_file = os.path.join(checkpoint, \"rng_state.pth\")\n if not os.path.isfile(rng_file):\n logger.info(\n \"Didn't find an RNG file, if you are resuming a training that was launched in a distributed \"\n \"fashion, reproducibility is not guaranteed.\"\n )\n return\n\n checkpoint_rng_state = torch.load(rng_file)\n random.setstate(checkpoint_rng_state[\"python\"])\n np.random.set_state(checkpoint_rng_state[\"numpy\"])\n torch.random.set_rng_state(checkpoint_rng_state[\"cpu\"])\n if torch.cuda.is_available():\n if self.args.local_rank != -1:\n torch.cuda.random.set_rng_state(checkpoint_rng_state[\"cuda\"])\n else:\n try:\n torch.cuda.random.set_rng_state_all(checkpoint_rng_state[\"cuda\"])\n except Exception as e:\n logger.infor(\n f\"Didn't manage to set back the RNG states of the GPU because of the following error:\\n {e}\"\n \"\\nThis won't yield the same results as if the training had not been interrupted.\"\n )\n if is_torch_tpu_available():\n xm.set_rng_state(checkpoint_rng_state[\"xla\"])\n\n def _save_checkpoint(self, model, trial, metrics=None):\n # In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we\n # want to save except FullyShardedDDP.\n # assert unwrap_model(model) is self.model, \"internal model should be a reference to self.model\"\n\n # Save model checkpoint\n checkpoint_folder = f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\"\n\n if self.hp_search_backend is not None and trial is not None:\n if self.hp_search_backend == HPSearchBackend.OPTUNA:\n run_id = trial.number\n elif self.hp_search_backend == HPSearchBackend.RAY:\n from ray import tune\n\n run_id = tune.get_trial_id()\n elif self.hp_search_backend == HPSearchBackend.SIGOPT:\n run_id = trial.id\n elif self.hp_search_backend == HPSearchBackend.WANDB:\n import wandb\n\n run_id = wandb.run.id\n run_name = self.hp_name(trial) if self.hp_name is not None else f\"run-{run_id}\"\n run_dir = os.path.join(self.args.output_dir, run_name)\n else:\n run_dir = self.args.output_dir\n self.store_flos()\n\n output_dir = os.path.join(run_dir, checkpoint_folder)\n self.save_model(output_dir, _internal_call=True)\n if self.deepspeed:\n # under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed\n # config `stage3_gather_fp16_weights_on_model_save` is True\n self.deepspeed.save_checkpoint(output_dir)\n\n # Save optimizer and scheduler\n if self.sharded_ddp == ShardedDDPOption.SIMPLE:\n self.optimizer.consolidate_state_dict()\n\n if is_torch_tpu_available():\n xm.rendezvous(\"saving_optimizer_states\")\n xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))\n with warnings.catch_warnings(record=True) as caught_warnings:\n xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))\n reissue_pt_warnings(caught_warnings)\n elif is_sagemaker_mp_enabled():\n if smp.dp_rank() == 0:\n # Consolidate the state dict on all processed of dp_rank 0\n opt_state_dict = self.optimizer.state_dict()\n # Save it and the scheduler on the main process\n if self.args.should_save:\n torch.save(opt_state_dict, os.path.join(output_dir, OPTIMIZER_NAME))\n with warnings.catch_warnings(record=True) as caught_warnings:\n torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))\n reissue_pt_warnings(caught_warnings)\n if self.do_grad_scaling:\n torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))\n elif self.args.should_save and not self.deepspeed:\n # deepspeed.save_checkpoint above saves model/optim/sched\n torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))\n with warnings.catch_warnings(record=True) as caught_warnings:\n torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))\n reissue_pt_warnings(caught_warnings)\n if self.do_grad_scaling:\n torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))\n\n # Determine the new best metric / best model checkpoint\n if metrics is not None and self.args.metric_for_best_model is not None:\n metric_to_check = self.args.metric_for_best_model\n if not metric_to_check.startswith(\"eval_\"):\n metric_to_check = f\"eval_{metric_to_check}\"\n metric_value = metrics[metric_to_check]\n\n operator = np.greater if self.args.greater_is_better else np.less\n if (\n self.state.best_metric is None\n or self.state.best_model_checkpoint is None\n or operator(metric_value, self.state.best_metric)\n ):\n self.state.best_metric = metric_value\n self.state.best_model_checkpoint = output_dir\n\n # Save the Trainer state\n if self.args.should_save:\n self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME))\n\n # Save RNG state in non-distributed training\n rng_states = {\n \"python\": random.getstate(),\n \"numpy\": np.random.get_state(),\n \"cpu\": torch.random.get_rng_state(),\n }\n if torch.cuda.is_available():\n if self.args.local_rank == -1:\n # In non distributed, we save the global CUDA RNG state (will take care of DataParallel)\n rng_states[\"cuda\"] = torch.cuda.random.get_rng_state_all()\n else:\n rng_states[\"cuda\"] = torch.cuda.random.get_rng_state()\n\n if is_torch_tpu_available():\n rng_states[\"xla\"] = xm.get_rng_state()\n\n # A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may\n # not yet exist.\n os.makedirs(output_dir, exist_ok=True)\n local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank\n if local_rank == -1:\n torch.save(rng_states, os.path.join(output_dir, \"rng_state.pth\"))\n else:\n torch.save(rng_states, os.path.join(output_dir, f\"rng_state_{local_rank}.pth\"))\n\n if self.args.push_to_hub:\n self._push_from_checkpoint(output_dir)\n\n # Maybe delete some older checkpoints.\n if self.args.should_save:\n self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)\n\n def _load_optimizer_and_scheduler(self, checkpoint):\n \"\"\"If optimizer and scheduler states exist, load them.\"\"\"\n if checkpoint is None:\n return\n\n if self.deepspeed:\n # deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init\n return\n\n if os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME)) and os.path.isfile(\n os.path.join(checkpoint, SCHEDULER_NAME)\n ):\n # Load in optimizer and scheduler states\n if is_torch_tpu_available():\n # On TPU we have to take some extra precautions to properly load the states on the right device.\n optimizer_state = torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=\"cpu\")\n with warnings.catch_warnings(record=True) as caught_warnings:\n lr_scheduler_state = torch.load(os.path.join(checkpoint, SCHEDULER_NAME), map_location=\"cpu\")\n reissue_pt_warnings(caught_warnings)\n\n xm.send_cpu_data_to_device(optimizer_state, self.args.device)\n xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)\n\n self.optimizer.load_state_dict(optimizer_state)\n self.lr_scheduler.load_state_dict(lr_scheduler_state)\n else:\n map_location = \"cpu\" if is_sagemaker_mp_enabled() else self.args.device\n self.optimizer.load_state_dict(\n torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location)\n )\n with warnings.catch_warnings(record=True) as caught_warnings:\n self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME)))\n reissue_pt_warnings(caught_warnings)\n if self.do_grad_scaling and os.path.isfile(os.path.join(checkpoint, SCALER_NAME)):\n self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, SCALER_NAME)))\n\n def hyperparameter_search(\n self,\n hp_space: Optional[Callable[[\"optuna.Trial\"], Dict[str, float]]] = None,\n compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,\n n_trials: int = 20,\n direction: str = \"minimize\",\n backend: Optional[Union[\"str\", HPSearchBackend]] = None,\n hp_name: Optional[Callable[[\"optuna.Trial\"], str]] = None,\n **kwargs,\n ) -> BestRun:\n \"\"\"\n Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined\n by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided,\n the sum of all metrics otherwise.\n\n <Tip warning={true}>\n\n To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to\n reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to\n subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom\n optimizer/scheduler.\n\n </Tip>\n\n Args:\n hp_space (`Callable[[\"optuna.Trial\"], Dict[str, float]]`, *optional*):\n A function that defines the hyperparameter search space. Will default to\n [`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or\n [`~trainer_utils.default_hp_space_sigopt`] depending on your backend.\n compute_objective (`Callable[[Dict[str, float]], float]`, *optional*):\n A function computing the objective to minimize or maximize from the metrics returned by the `evaluate`\n method. Will default to [`~trainer_utils.default_compute_objective`].\n n_trials (`int`, *optional*, defaults to 100):\n The number of trial runs to test.\n direction(`str`, *optional*, defaults to `\"minimize\"`):\n Whether to optimize greater or lower objects. Can be `\"minimize\"` or `\"maximize\"`, you should pick\n `\"minimize\"` when optimizing the validation loss, `\"maximize\"` when optimizing one or several metrics.\n backend(`str` or [`~training_utils.HPSearchBackend`], *optional*):\n The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending\n on which one is installed. If all are installed, will default to optuna.\n kwargs:\n Additional keyword arguments passed along to `optuna.create_study` or `ray.tune.run`. For more\n information see:\n\n - the documentation of\n [optuna.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html)\n - the documentation of [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run)\n - the documentation of [sigopt](https://app.sigopt.com/docs/endpoints/experiments/create)\n\n Returns:\n [`trainer_utils.BestRun`]: All the information about the best run.\n \"\"\"\n if backend is None:\n backend = default_hp_search_backend()\n if backend is None:\n raise RuntimeError(\n \"At least one of optuna or ray should be installed. \"\n \"To install optuna run `pip install optuna`. \"\n \"To install ray run `pip install ray[tune]`. \"\n \"To install sigopt run `pip install sigopt`.\"\n )\n backend = HPSearchBackend(backend)\n if backend == HPSearchBackend.OPTUNA and not is_optuna_available():\n raise RuntimeError(\"You picked the optuna backend, but it is not installed. Use `pip install optuna`.\")\n if backend == HPSearchBackend.RAY and not is_ray_tune_available():\n raise RuntimeError(\n \"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`.\"\n )\n if backend == HPSearchBackend.SIGOPT and not is_sigopt_available():\n raise RuntimeError(\"You picked the sigopt backend, but it is not installed. Use `pip install sigopt`.\")\n if backend == HPSearchBackend.WANDB and not is_wandb_available():\n raise RuntimeError(\"You picked the wandb backend, but it is not installed. Use `pip install wandb`.\")\n self.hp_search_backend = backend\n if self.model_init is None:\n raise RuntimeError(\n \"To use hyperparameter search, you need to pass your model through a model_init function.\"\n )\n\n self.hp_space = default_hp_space[backend] if hp_space is None else hp_space\n self.hp_name = hp_name\n self.compute_objective = default_compute_objective if compute_objective is None else compute_objective\n\n backend_dict = {\n HPSearchBackend.OPTUNA: run_hp_search_optuna,\n HPSearchBackend.RAY: run_hp_search_ray,\n HPSearchBackend.SIGOPT: run_hp_search_sigopt,\n HPSearchBackend.WANDB: run_hp_search_wandb,\n }\n best_run = backend_dict[backend](self, n_trials, direction, **kwargs)\n\n self.hp_search_backend = None\n return best_run\n\n def log(self, logs: Dict[str, float]) -> None:\n \"\"\"\n Log `logs` on the various objects watching training.\n\n Subclass and override this method to inject custom behavior.\n\n Args:\n logs (`Dict[str, float]`):\n The values to log.\n \"\"\"\n if self.state.epoch is not None:\n logs[\"epoch\"] = round(self.state.epoch, 2)\n\n output = {**logs, **{\"step\": self.state.global_step}}\n self.state.log_history.append(output)\n self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)\n\n def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]:\n \"\"\"\n Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors.\n \"\"\"\n if isinstance(data, Mapping):\n return type(data)({k: self._prepare_input(v) for k, v in data.items()})\n elif isinstance(data, (tuple, list)):\n return type(data)(self._prepare_input(v) for v in data)\n elif isinstance(data, torch.Tensor):\n kwargs = dict(device=self.args.device)\n if self.deepspeed and data.dtype != torch.int64:\n # NLP models inputs are int64 and those get adjusted to the right dtype of the\n # embedding. Other models such as wav2vec2's inputs are already float and thus\n # may need special handling to match the dtypes of the model\n kwargs.update(dict(dtype=self.args.hf_deepspeed_config.dtype()))\n return data.to(**kwargs)\n return data\n\n def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:\n \"\"\"\n Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and\n handling potential state.\n \"\"\"\n inputs = self._prepare_input(inputs)\n if self.args.past_index >= 0 and self._past is not None:\n inputs[\"mems\"] = self._past\n\n return inputs\n\n def autocast_smart_context_manager(self):\n \"\"\"\n A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired\n arguments, depending on the situation.\n \"\"\"\n if self.use_amp:\n if version.parse(torch.__version__) >= version.parse(\"1.10\"):\n ctx_manager = autocast(dtype=self.amp_dtype)\n else:\n ctx_manager = autocast()\n else:\n ctx_manager = contextlib.nullcontext() if sys.version_info >= (3, 7) else contextlib.suppress()\n\n return ctx_manager\n\n def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:\n \"\"\"\n Perform a training step on a batch of inputs.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (`nn.Module`):\n The model to train.\n inputs (`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument `labels`. Check your model's documentation for all accepted arguments.\n\n Return:\n `torch.Tensor`: The tensor with training loss on this batch.\n \"\"\"\n model.train()\n inputs = self._prepare_inputs(inputs)\n\n if is_sagemaker_mp_enabled():\n scaler = self.scaler if self.do_grad_scaling else None\n loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps, scaler=scaler)\n return loss_mb.reduce_mean().detach().to(self.args.device)\n\n with self.autocast_smart_context_manager():\n loss = self.compute_loss(model, inputs)\n\n if self.args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:\n # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`\n loss = loss / self.args.gradient_accumulation_steps\n\n if self.do_grad_scaling:\n self.scaler.scale(loss).backward()\n elif self.use_apex:\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n elif self.deepspeed:\n # loss gets scaled under gradient_accumulation_steps in deepspeed\n loss = self.deepspeed.backward(loss)\n else:\n loss.backward()\n\n return loss.detach()\n\n def compute_loss(self, model, inputs, return_outputs=False):\n \"\"\"\n How the loss is computed by Trainer. By default, all models return the loss in the first element.\n\n Subclass and override for custom behavior.\n \"\"\"\n if self.label_smoother is not None and \"labels\" in inputs:\n labels = inputs.pop(\"labels\")\n else:\n labels = None\n outputs = model(**inputs)\n # Save past state if it exists\n # TODO: this needs to be fixed and made cleaner later.\n if self.args.past_index >= 0:\n self._past = outputs[self.args.past_index]\n\n if labels is not None:\n loss = self.label_smoother(outputs, labels)\n else:\n # We don't use .loss here since the model may return tuples instead of ModelOutput.\n loss = outputs[\"loss\"] if isinstance(outputs, dict) else outputs[0]\n\n return (loss, outputs) if return_outputs else loss\n\n def is_local_process_zero(self) -> bool:\n \"\"\"\n Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several\n machines) main process.\n \"\"\"\n return self.args.local_process_index == 0\n\n def is_world_process_zero(self) -> bool:\n \"\"\"\n Whether or not this process is the global main process (when training in a distributed fashion on several\n machines, this is only going to be `True` for one process).\n \"\"\"\n # Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global\n # process index.\n if is_sagemaker_mp_enabled():\n return smp.rank() == 0\n else:\n return self.args.process_index == 0\n\n def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False):\n \"\"\"\n Will save the model, so you can reload it using `from_pretrained()`.\n\n Will only save from the main process.\n \"\"\"\n\n if output_dir is None:\n output_dir = self.args.output_dir\n\n if is_torch_tpu_available():\n self._save_tpu(output_dir)\n elif is_sagemaker_mp_enabled():\n # Calling the state_dict needs to be done on the wrapped model and on all processes.\n state_dict = self.model_wrapped.state_dict()\n if self.args.should_save:\n self._save(output_dir, state_dict=state_dict)\n elif (\n ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp\n ):\n state_dict = self.model.state_dict()\n\n if self.args.should_save:\n self._save(output_dir, state_dict=state_dict)\n elif self.deepspeed:\n\n # this takes care of everything as long as we aren't under zero3\n if self.args.should_save:\n self._save(output_dir)\n\n if is_deepspeed_zero3_enabled():\n # It's too complicated to try to override different places where the weights dump gets\n # saved, so since under zero3 the file is bogus, simply delete it. The user should\n # either user deepspeed checkpoint to resume or to recover full weights use\n # zero_to_fp32.py stored in the checkpoint.\n if self.args.should_save:\n file = os.path.join(output_dir, WEIGHTS_NAME)\n if os.path.isfile(file):\n # logger.info(f\"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights\")\n os.remove(file)\n\n # now save the real model if stage3_gather_fp16_weights_on_model_save=True\n # if false it will not be saved.\n # This must be called on all ranks\n if not self.deepspeed.save_fp16_model(output_dir, WEIGHTS_NAME):\n logger.warning(\n \"deepspeed.save_fp16_model didn't save the model, since stage3_gather_fp16_weights_on_model_save=false. \"\n \"Saving the full checkpoint instead, use zero_to_fp32.py to recover weights\"\n )\n self.deepspeed.save_checkpoint(output_dir)\n\n elif self.args.should_save:\n self._save(output_dir)\n\n # Push to the Hub when `save_model` is called by the user.\n if self.args.push_to_hub and not _internal_call:\n self.push_to_hub(commit_message=\"Model save\")\n\n def _save_tpu(self, output_dir: Optional[str] = None):\n output_dir = output_dir if output_dir is not None else self.args.output_dir\n logger.info(f\"Saving model checkpoint to {output_dir}\")\n\n if xm.is_master_ordinal():\n os.makedirs(output_dir, exist_ok=True)\n torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))\n\n # Save a trained model and configuration using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n xm.rendezvous(\"saving_checkpoint\")\n if not isinstance(self.model, PreTrainedModel):\n if isinstance(unwrap_model(self.model), PreTrainedModel):\n unwrap_model(self.model).save_pretrained(\n output_dir,\n save_config=self.args.should_save,\n state_dict=self.model.state_dict(),\n save_function=xm.save,\n )\n else:\n logger.info(\"Trainer.model is not a `PreTrainedModel`, only saving its state dict.\")\n state_dict = self.model.state_dict()\n xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))\n else:\n self.model.save_pretrained(output_dir, save_config=self.args.should_save, save_function=xm.save)\n if self.tokenizer is not None and self.args.should_save:\n self.tokenizer.save_pretrained(output_dir)\n\n def _save(self, output_dir: Optional[str] = None, state_dict=None):\n # If we are executing this function, we are the process zero, so we don't check for that.\n output_dir = output_dir if output_dir is not None else self.args.output_dir\n os.makedirs(output_dir, exist_ok=True)\n logger.info(f\"Saving model checkpoint to {output_dir}\")\n # Save a trained model and configuration using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n if not isinstance(self.model, PreTrainedModel):\n if isinstance(unwrap_model(self.model), PreTrainedModel):\n if state_dict is None:\n state_dict = self.model.state_dict()\n unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)\n else:\n logger.info(\"Trainer.model is not a `PreTrainedModel`, only saving its state dict.\")\n if state_dict is None:\n state_dict = self.model.state_dict()\n torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))\n else:\n self.model.save_pretrained(output_dir, state_dict=state_dict)\n if self.tokenizer is not None:\n self.tokenizer.save_pretrained(output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))\n\n def store_flos(self):\n # Storing the number of floating-point operations that went into the model\n if self.args.local_rank != -1:\n self.state.total_flos += (\n distributed_broadcast_scalars([self.current_flos], device=self.args.device).sum().item()\n )\n self.current_flos = 0\n else:\n self.state.total_flos += self.current_flos\n self.current_flos = 0\n\n def _sorted_checkpoints(\n self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False\n ) -> List[str]:\n ordering_and_checkpoint_path = []\n\n glob_checkpoints = [str(x) for x in Path(output_dir).glob(f\"{checkpoint_prefix}-*\")]\n\n for path in glob_checkpoints:\n if use_mtime:\n ordering_and_checkpoint_path.append((os.path.getmtime(path), path))\n else:\n regex_match = re.match(f\".*{checkpoint_prefix}-([0-9]+)\", path)\n if regex_match is not None and regex_match.groups() is not None:\n ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))\n\n checkpoints_sorted = sorted(ordering_and_checkpoint_path)\n checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]\n # Make sure we don't delete the best model.\n if self.state.best_model_checkpoint is not None:\n best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))\n for i in range(best_model_index, len(checkpoints_sorted) - 2):\n checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i]\n return checkpoints_sorted\n\n def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:\n if self.args.save_total_limit is None or self.args.save_total_limit <= 0:\n return\n\n # Check if we should delete older checkpoint(s)\n checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)\n if len(checkpoints_sorted) <= self.args.save_total_limit:\n return\n\n # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which\n # we don't do to allow resuming.\n save_total_limit = self.args.save_total_limit\n if (\n self.state.best_model_checkpoint is not None\n and self.args.save_total_limit == 1\n and checkpoints_sorted[-1] != self.state.best_model_checkpoint\n ):\n save_total_limit = 2\n\n number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)\n checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]\n for checkpoint in checkpoints_to_be_deleted:\n logger.info(f\"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit\")\n shutil.rmtree(checkpoint)\n\n def evaluate(\n self,\n eval_dataset: Optional[Dataset] = None,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n ) -> Dict[str, float]:\n \"\"\"\n Run evaluation and returns metrics.\n\n The calling script will be responsible for providing a method to compute metrics, as they are task-dependent\n (pass it to the init `compute_metrics` argument).\n\n You can also subclass and override this method to inject custom behavior.\n\n Args:\n eval_dataset (`Dataset`, *optional*):\n Pass a dataset if you wish to override `self.eval_dataset`. If it is an `datasets.Dataset`, columns not\n accepted by the `model.forward()` method are automatically removed. It must implement the `__len__`\n method.\n ignore_keys (`Lst[str]`, *optional*):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (`str`, *optional*, defaults to `\"eval\"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics \"bleu\" will be named\n \"eval_bleu\" if the prefix is \"eval\" (default)\n\n Returns:\n A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The\n dictionary also contains the epoch number which comes from the training state.\n \"\"\"\n # memory metrics - must set up as early as possible\n self._memory_tracker.start()\n\n eval_dataloader = self.get_eval_dataloader(eval_dataset)\n start_time = time.time()\n\n eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop\n output = eval_loop(\n eval_dataloader,\n description=\"Evaluation\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n # self.args.prediction_loss_only\n prediction_loss_only=True if self.compute_metrics is None else None,\n ignore_keys=ignore_keys,\n metric_key_prefix=metric_key_prefix,\n )\n\n total_batch_size = self.args.eval_batch_size * self.args.world_size\n output.metrics.update(\n speed_metrics(\n metric_key_prefix,\n start_time,\n num_samples=output.num_samples,\n num_steps=math.ceil(output.num_samples / total_batch_size),\n )\n )\n\n self.log(output.metrics)\n\n if DebugOption.TPU_METRICS_DEBUG in self.args.debug:\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n\n self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)\n\n self._memory_tracker.stop_and_update_metrics(output.metrics)\n\n return output.metrics\n\n def predict(\n self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = \"test\"\n ) -> PredictionOutput:\n \"\"\"\n Run prediction and returns predictions and potential metrics.\n\n Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method\n will also return metrics, like in `evaluate()`.\n\n Args:\n test_dataset (`Dataset`):\n Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the\n `model.forward()` method are automatically removed. Has to implement the method `__len__`\n ignore_keys (`Lst[str]`, *optional*):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (`str`, *optional*, defaults to `\"test\"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics \"bleu\" will be named\n \"test_bleu\" if the prefix is \"test\" (default)\n\n <Tip>\n\n If your predictions or labels have different sequence length (for instance because you're doing dynamic padding\n in a token classification task) the predictions will be padded (on the right) to allow for concatenation into\n one array. The padding index is -100.\n\n </Tip>\n\n Returns: *NamedTuple* A namedtuple with the following keys:\n\n - predictions (`np.ndarray`): The predictions on `test_dataset`.\n - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).\n - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained\n labels).\n \"\"\"\n # memory metrics - must set up as early as possible\n self._memory_tracker.start()\n\n test_dataloader = self.get_test_dataloader(test_dataset)\n start_time = time.time()\n\n eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop\n output = eval_loop(\n test_dataloader, description=\"Prediction\", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix\n )\n total_batch_size = self.args.eval_batch_size * self.args.world_size\n output.metrics.update(\n speed_metrics(\n metric_key_prefix,\n start_time,\n num_samples=output.num_samples,\n num_steps=math.ceil(output.num_samples / total_batch_size),\n )\n )\n\n self._memory_tracker.stop_and_update_metrics(output.metrics)\n\n return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics)\n\n def evaluation_loop(\n self,\n dataloader: DataLoader,\n description: str,\n prediction_loss_only: Optional[bool] = None,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n ) -> EvalLoopOutput:\n \"\"\"\n Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.\n\n Works both with or without labels.\n \"\"\"\n args = self.args\n\n prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only\n\n # if eval is called w/o train init deepspeed here\n if args.deepspeed and not self.deepspeed:\n\n # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval\n # from the checkpoint eventually\n deepspeed_engine, _, _ = deepspeed_init(\n self, num_training_steps=0, resume_from_checkpoint=None, inference=True\n )\n self.model = deepspeed_engine.module\n self.model_wrapped = deepspeed_engine\n self.deepspeed = deepspeed_engine\n\n model = self._wrap_model(self.model, training=False)\n\n # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called\n # while ``train`` is running, cast it to the right dtype first and then put on device\n if not self.is_in_train:\n if args.fp16_full_eval:\n model = model.to(dtype=torch.float16, device=args.device)\n elif args.bf16_full_eval:\n model = model.to(dtype=torch.bfloat16, device=args.device)\n\n batch_size = dataloader.batch_size\n\n logger.info(f\"***** Running {description} *****\")\n if has_length(dataloader.dataset):\n logger.info(f\" Num examples = {self.num_examples(dataloader)}\")\n else:\n logger.info(\" Num examples: Unknown\")\n logger.info(f\" Batch size = {batch_size}\")\n\n model.eval()\n\n self.callback_handler.eval_dataloader = dataloader\n # Do this before wrapping.\n eval_dataset = dataloader.dataset\n\n if is_torch_tpu_available():\n dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device)\n\n if args.past_index >= 0:\n self._past = None\n\n # Initialize containers\n # losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)\n losses_host = None\n preds_host = None\n labels_host = None\n # losses/preds/labels on CPU (final containers)\n all_losses = None\n all_preds = None\n all_labels = None\n # Will be useful when we have an iterable dataset so don't know its length.\n\n observed_num_examples = 0\n # Main evaluation loop\n for step, inputs in enumerate(dataloader):\n # Update the observed num examples\n observed_batch_size = find_batch_size(inputs)\n if observed_batch_size is not None:\n observed_num_examples += observed_batch_size\n # For batch samplers, batch_size is not known by the dataloader in advance.\n if batch_size is None:\n batch_size = observed_batch_size\n\n # Prediction step\n loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)\n\n if is_torch_tpu_available():\n xm.mark_step()\n\n # Update containers on host\n if loss is not None:\n losses = self._nested_gather(loss.repeat(batch_size))\n losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)\n if labels is not None:\n labels = self._pad_across_processes(labels)\n labels = self._nested_gather(labels)\n labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)\n if logits is not None:\n logits = self._pad_across_processes(logits)\n logits = self._nested_gather(logits)\n if self.preprocess_logits_for_metrics is not None:\n logits = self.preprocess_logits_for_metrics(logits, labels)\n preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)\n self.control = self.callback_handler.on_prediction_step(args, self.state, self.control)\n\n # Gather all tensors and put them back on the CPU if we have done enough accumulation steps.\n if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0:\n if losses_host is not None:\n losses = nested_numpify(losses_host)\n all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)\n if preds_host is not None:\n logits = nested_numpify(preds_host)\n all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)\n if labels_host is not None:\n labels = nested_numpify(labels_host)\n all_labels = (\n labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)\n )\n\n # Set back to None to begin a new accumulation\n losses_host, preds_host, labels_host = None, None, None\n\n if args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of the evaluation loop\n delattr(self, \"_past\")\n\n # Gather all remaining tensors and put them back on the CPU\n if losses_host is not None:\n losses = nested_numpify(losses_host)\n all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)\n if preds_host is not None:\n logits = nested_numpify(preds_host)\n all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)\n if labels_host is not None:\n labels = nested_numpify(labels_host)\n all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)\n\n # Number of samples\n if has_length(eval_dataset):\n num_samples = len(eval_dataset)\n # The instance check is weird and does not actually check for the type, but whether the dataset has the right\n # methods. Therefore we need to make sure it also has the attribute.\n elif isinstance(eval_dataset, IterableDatasetShard) and hasattr(eval_dataset, \"num_examples\"):\n num_samples = eval_dataset.num_examples\n else:\n num_samples = observed_num_examples\n\n # Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of\n # samplers has been rounded to a multiple of batch_size, so we truncate.\n if all_losses is not None:\n all_losses = all_losses[:num_samples]\n if all_preds is not None:\n all_preds = nested_truncate(all_preds, num_samples)\n if all_labels is not None:\n all_labels = nested_truncate(all_labels, num_samples)\n\n # Metrics!\n if self.compute_metrics is not None and all_preds is not None and all_labels is not None:\n metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))\n else:\n metrics = {}\n\n # To be JSON-serializable, we need to remove numpy types or zero-d tensors\n metrics = denumpify_detensorize(metrics)\n\n if all_losses is not None:\n metrics[f\"{metric_key_prefix}_loss\"] = all_losses.mean().item()\n\n # Prefix all keys with metric_key_prefix + '_'\n for key in list(metrics.keys()):\n if not key.startswith(f\"{metric_key_prefix}_\"):\n metrics[f\"{metric_key_prefix}_{key}\"] = metrics.pop(key)\n\n return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)\n\n def _nested_gather(self, tensors, name=None):\n \"\"\"\n Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before\n concatenating them to `gathered`\n \"\"\"\n if tensors is None:\n return\n if is_torch_tpu_available():\n if name is None:\n name = \"nested_gather\"\n tensors = nested_xla_mesh_reduce(tensors, name)\n elif is_sagemaker_mp_enabled():\n tensors = smp_gather(tensors)\n elif self.args.local_rank != -1:\n tensors = distributed_concat(tensors)\n return tensors\n\n # Copied from Accelerate.\n def _pad_across_processes(self, tensor, pad_index=-100):\n \"\"\"\n Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so\n they can safely be gathered.\n \"\"\"\n if isinstance(tensor, (list, tuple)):\n return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor)\n elif isinstance(tensor, dict):\n return type(tensor)({k: self._pad_across_processes(v, pad_index=pad_index) for k, v in tensor.items()})\n elif not isinstance(tensor, torch.Tensor):\n raise TypeError(\n f\"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.\"\n )\n\n if len(tensor.shape) < 2:\n return tensor\n # Gather all sizes\n size = torch.tensor(tensor.shape, device=tensor.device)[None]\n sizes = self._nested_gather(size).cpu()\n\n max_size = max(s[1] for s in sizes)\n if tensor.shape[1] == max_size:\n return tensor\n\n # Then pad to the maximum size\n old_size = tensor.shape\n new_size = list(old_size)\n new_size[1] = max_size\n new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index\n new_tensor[:, : old_size[1]] = tensor\n return new_tensor\n\n def prediction_step(\n self,\n model: nn.Module,\n inputs: Dict[str, Union[torch.Tensor, Any]],\n prediction_loss_only: bool,\n ignore_keys: Optional[List[str]] = None,\n ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:\n \"\"\"\n Perform an evaluation step on `model` using `inputs`.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (`nn.Module`):\n The model to evaluate.\n inputs (`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument `labels`. Check your model's documentation for all accepted arguments.\n prediction_loss_only (`bool`):\n Whether or not to return the loss only.\n ignore_keys (`Lst[str]`, *optional*):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n\n Return:\n Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,\n logits and labels (each being optional).\n \"\"\"\n has_labels = all(inputs.get(k) is not None for k in self.label_names)\n inputs = self._prepare_inputs(inputs)\n if ignore_keys is None:\n if hasattr(self.model, \"config\"):\n ignore_keys = getattr(self.model.config, \"keys_to_ignore_at_inference\", [])\n else:\n ignore_keys = []\n\n # labels may be popped when computing the loss (label smoothing for instance) so we grab them first.\n if has_labels:\n labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))\n if len(labels) == 1:\n labels = labels[0]\n else:\n labels = None\n\n with torch.no_grad():\n if is_sagemaker_mp_enabled():\n raw_outputs = smp_forward_only(model, inputs)\n if has_labels:\n if isinstance(raw_outputs, dict):\n loss_mb = raw_outputs[\"loss\"]\n logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + [\"loss\"])\n else:\n loss_mb = raw_outputs[0]\n logits_mb = raw_outputs[1:]\n\n loss = loss_mb.reduce_mean().detach().cpu()\n logits = smp_nested_concat(logits_mb)\n else:\n loss = None\n if isinstance(raw_outputs, dict):\n logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)\n else:\n logits_mb = raw_outputs\n logits = smp_nested_concat(logits_mb)\n else:\n if has_labels:\n with self.autocast_smart_context_manager():\n loss, outputs = self.compute_loss(model, inputs, return_outputs=True)\n loss = loss.mean().detach()\n\n if isinstance(outputs, dict):\n logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + [\"loss\"])\n else:\n logits = outputs[1:]\n else:\n loss = None\n with self.autocast_smart_context_manager():\n outputs = model(**inputs)\n if isinstance(outputs, dict):\n logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)\n else:\n logits = outputs\n # TODO: this needs to be fixed and made cleaner later.\n if self.args.past_index >= 0:\n self._past = outputs[self.args.past_index - 1]\n\n if prediction_loss_only:\n return (loss, None, None)\n\n logits = nested_detach(logits)\n if len(logits) == 1:\n logits = logits[0]\n\n return (loss, logits, labels)\n\n def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):\n \"\"\"\n For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point\n operations for every backward + forward pass. If using another model, either implement such a method in the\n model or subclass and override this method.\n\n Args:\n inputs (`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n Returns:\n `int`: The number of floating-point operations.\n \"\"\"\n if hasattr(self.model, \"floating_point_ops\"):\n return self.model.floating_point_ops(inputs)\n else:\n return 0\n\n def init_git_repo(self, at_init: bool = False):\n \"\"\"\n Initializes a git repo in `self.args.hub_model_id`.\n\n Args:\n at_init (`bool`, *optional*, defaults to `False`):\n Whether this function is called before any training or not. If `self.args.overwrite_output_dir` is\n `True` and `at_init` is `True`, the path to the repo (which is `self.args.output_dir`) might be wiped\n out.\n \"\"\"\n if not self.is_world_process_zero():\n return\n use_auth_token = True if self.args.hub_token is None else self.args.hub_token\n if self.args.hub_model_id is None:\n repo_name = Path(self.args.output_dir).absolute().name\n else:\n repo_name = self.args.hub_model_id\n if \"/\" not in repo_name:\n repo_name = get_full_repo_name(repo_name, token=self.args.hub_token)\n\n try:\n self.repo = Repository(\n self.args.output_dir,\n clone_from=repo_name,\n use_auth_token=use_auth_token,\n )\n except EnvironmentError:\n if self.args.overwrite_output_dir and at_init:\n # Try again after wiping output_dir\n shutil.rmtree(self.args.output_dir)\n self.repo = Repository(\n self.args.output_dir,\n clone_from=repo_name,\n use_auth_token=use_auth_token,\n )\n else:\n raise\n\n self.repo.git_pull()\n\n # By default, ignore the checkpoint folders\n if (\n not os.path.exists(os.path.join(self.args.output_dir, \".gitignore\"))\n and self.args.hub_strategy != HubStrategy.ALL_CHECKPOINTS\n ):\n with open(os.path.join(self.args.output_dir, \".gitignore\"), \"w\", encoding=\"utf-8\") as writer:\n writer.writelines([\"checkpoint-*/\"])\n\n self.push_in_progress = None\n\n def create_model_card(\n self,\n language: Optional[str] = None,\n license: Optional[str] = None,\n tags: Optional[str] = None,\n model_name: Optional[str] = None,\n finetuned_from: Optional[str] = None,\n tasks: Optional[str] = None,\n dataset_tags: Optional[Union[str, List[str]]] = None,\n dataset: Optional[Union[str, List[str]]] = None,\n dataset_args: Optional[Union[str, List[str]]] = None,\n ):\n if not self.is_world_process_zero():\n return\n\n training_summary = TrainingSummary.from_trainer(\n self,\n language=language,\n license=license,\n tags=tags,\n model_name=model_name,\n finetuned_from=finetuned_from,\n tasks=tasks,\n dataset_tags=dataset_tags,\n dataset=dataset,\n dataset_args=dataset_args,\n )\n model_card = training_summary.to_model_card()\n with open(os.path.join(self.args.output_dir, \"README.md\"), \"w\") as f:\n f.write(model_card)\n\n def _push_from_checkpoint(self, checkpoint_folder):\n # Only push from one node.\n if not self.is_world_process_zero() or self.args.hub_strategy == HubStrategy.END:\n return\n # If we haven't finished the last push, we don't do this one.\n if self.push_in_progress is not None and not self.push_in_progress.is_done:\n return\n\n output_dir = self.args.output_dir\n # To avoid a new synchronization of all model weights, we just copy the file from the checkpoint folder\n modeling_files = [CONFIG_NAME, WEIGHTS_NAME]\n for modeling_file in modeling_files:\n if os.path.isfile(os.path.join(checkpoint_folder, modeling_file)):\n shutil.copy(os.path.join(checkpoint_folder, modeling_file), os.path.join(output_dir, modeling_file))\n # Saving the tokenizer is fast and we don't know how many files it may have spawned, so we resave it to be sure.\n if self.tokenizer is not None:\n self.tokenizer.save_pretrained(output_dir)\n # Same for the training arguments\n torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))\n\n try:\n if self.args.hub_strategy == HubStrategy.CHECKPOINT:\n # Temporarily move the checkpoint just saved for the push\n tmp_checkpoint = os.path.join(output_dir, \"last-checkpoint\")\n # We have to remove the \"last-checkpoint\" dir if it exists, otherwise the checkpoint is moved as a\n # subfolder.\n if os.path.isdir(tmp_checkpoint):\n shutil.rmtree(tmp_checkpoint)\n shutil.move(checkpoint_folder, tmp_checkpoint)\n\n if self.args.save_strategy == IntervalStrategy.STEPS:\n commit_message = f\"Training in progress, step {self.state.global_step}\"\n else:\n commit_message = f\"Training in progress, epoch {int(self.state.epoch)}\"\n _, self.push_in_progress = self.repo.push_to_hub(\n commit_message=commit_message, blocking=False, auto_lfs_prune=True\n )\n finally:\n if self.args.hub_strategy == HubStrategy.CHECKPOINT:\n # Move back the checkpoint to its place\n shutil.move(tmp_checkpoint, checkpoint_folder)\n\n def push_to_hub(self, commit_message: Optional[str] = \"End of training\", blocking: bool = True, **kwargs) -> str:\n \"\"\"\n Upload *self.model* and *self.tokenizer* to the 🤗 model hub on the repo *self.args.hub_model_id*.\n\n Parameters:\n commit_message (`str`, *optional*, defaults to `\"End of training\"`):\n Message to commit while pushing.\n blocking (`bool`, *optional*, defaults to `True`):\n Whether the function should return only when the `git push` has finished.\n kwargs:\n Additional keyword arguments passed along to [`~Trainer.create_model_card`].\n\n Returns:\n The url of the commit of your model in the given repository if `blocking=False`, a tuple with the url of\n the commit and an object to track the progress of the commit if `blocking=True`\n \"\"\"\n # If a user calls manually `push_to_hub` with `self.args.push_to_hub = False`, we try to create the repo but\n # it might fail.\n if not hasattr(self, \"repo\"):\n self.init_git_repo()\n\n if self.args.should_save:\n if self.args.hub_model_id is None:\n model_name = Path(self.args.output_dir).name\n else:\n model_name = self.args.hub_model_id.split(\"/\")[-1]\n\n # Needs to be executed on all processes for TPU training, but will only save on the processed determined by\n # self.args.should_save.\n self.save_model(_internal_call=True)\n\n # Only push from one node.\n if not self.is_world_process_zero():\n return\n\n git_head_commit_url = self.repo.push_to_hub(\n commit_message=commit_message, blocking=blocking, auto_lfs_prune=True\n )\n # push separately the model card to be independant from the rest of the model\n if self.args.should_save:\n self.create_model_card(model_name=model_name, **kwargs)\n try:\n self.repo.push_to_hub(\n commit_message=\"update model card README.md\", blocking=blocking, auto_lfs_prune=True\n )\n except EnvironmentError as exc:\n logger.error(f\"Error pushing update to the model card. Please read logs and retry.\\n${exc}\")\n\n return git_head_commit_url\n\n #\n # Deprecated code\n #\n\n def prediction_loop(\n self,\n dataloader: DataLoader,\n description: str,\n prediction_loss_only: Optional[bool] = None,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n ) -> PredictionOutput:\n \"\"\"\n Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.\n\n Works both with or without labels.\n \"\"\"\n args = self.args\n\n if not has_length(dataloader.dataset):\n raise ValueError(\"dataset must implement __len__\")\n prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only\n\n # if eval is called w/o train init deepspeed here\n if args.deepspeed and not self.deepspeed:\n\n # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval\n # from the checkpoint eventually\n deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)\n self.model = deepspeed_engine.module\n self.model_wrapped = deepspeed_engine\n self.deepspeed = deepspeed_engine\n # XXX: we don't need optim/sched for inference, but this needs to be sorted out, since\n # for example the Z3-optimizer is a must for zero3 to work even for inference - what we\n # don't need is the deepspeed basic optimizer which is self.optimizer.optimizer\n deepspeed_engine.optimizer.optimizer = None\n deepspeed_engine.lr_scheduler = None\n\n model = self._wrap_model(self.model, training=False)\n\n # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called\n # while ``train`` is running, cast it to the right dtype first and then put on device\n if not self.is_in_train:\n if args.fp16_full_eval:\n model = model.to(dtype=torch.float16, device=args.device)\n elif args.bf16_full_eval:\n model = model.to(dtype=torch.bfloat16, device=args.device)\n\n batch_size = dataloader.batch_size\n num_examples = self.num_examples(dataloader)\n logger.info(f\"***** Running {description} *****\")\n logger.info(f\" Num examples = {num_examples}\")\n logger.info(f\" Batch size = {batch_size}\")\n losses_host: torch.Tensor = None\n preds_host: Union[torch.Tensor, List[torch.Tensor]] = None\n labels_host: Union[torch.Tensor, List[torch.Tensor]] = None\n\n world_size = max(1, args.world_size)\n\n eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)\n if not prediction_loss_only:\n # The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass\n # a batch size to the sampler)\n make_multiple_of = None\n if hasattr(dataloader, \"sampler\") and isinstance(dataloader.sampler, SequentialDistributedSampler):\n make_multiple_of = dataloader.sampler.batch_size\n preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)\n labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)\n\n model.eval()\n\n if is_torch_tpu_available():\n dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device)\n\n if args.past_index >= 0:\n self._past = None\n\n self.callback_handler.eval_dataloader = dataloader\n\n for step, inputs in enumerate(dataloader):\n loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)\n if loss is not None:\n losses = loss.repeat(batch_size)\n losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)\n if logits is not None:\n preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)\n if labels is not None:\n labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)\n self.control = self.callback_handler.on_prediction_step(args, self.state, self.control)\n\n # Gather all tensors and put them back on the CPU if we have done enough accumulation steps.\n if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0:\n eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, \"eval_losses\"))\n if not prediction_loss_only:\n preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, \"eval_preds\"))\n labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, \"eval_label_ids\"))\n\n # Set back to None to begin a new accumulation\n losses_host, preds_host, labels_host = None, None, None\n\n if args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of the evaluation loop\n delattr(self, \"_past\")\n\n # Gather all remaining tensors and put them back on the CPU\n eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, \"eval_losses\"))\n if not prediction_loss_only:\n preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, \"eval_preds\"))\n labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, \"eval_label_ids\"))\n\n eval_loss = eval_losses_gatherer.finalize()\n preds = preds_gatherer.finalize() if not prediction_loss_only else None\n label_ids = labels_gatherer.finalize() if not prediction_loss_only else None\n\n if self.compute_metrics is not None and preds is not None and label_ids is not None:\n metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))\n else:\n metrics = {}\n\n # To be JSON-serializable, we need to remove numpy types or zero-d tensors\n metrics = denumpify_detensorize(metrics)\n\n if eval_loss is not None:\n metrics[f\"{metric_key_prefix}_loss\"] = eval_loss.mean().item()\n\n # Prefix all keys with metric_key_prefix + '_'\n for key in list(metrics.keys()):\n if not key.startswith(f\"{metric_key_prefix}_\"):\n metrics[f\"{metric_key_prefix}_{key}\"] = metrics.pop(key)\n\n return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)\n\n def _gather_and_numpify(self, tensors, name):\n \"\"\"\n Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before\n concatenating them to `gathered`\n \"\"\"\n if tensors is None:\n return\n if is_torch_tpu_available():\n tensors = nested_xla_mesh_reduce(tensors, name)\n elif is_sagemaker_mp_enabled():\n tensors = smp_gather(tensors)\n elif self.args.local_rank != -1:\n tensors = distributed_concat(tensors)\n\n return nested_numpify(tensors)\n"
] |
[
[
"torch.load",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.cuda.random.get_rng_state_all",
"torch.cuda.amp.autocast",
"numpy.concatenate",
"torch.no_grad",
"torch.cuda.is_available",
"torch.Generator",
"torch.utils.data.distributed.DistributedSampler",
"torch.distributed.barrier",
"torch.tensor",
"numpy.random.set_state",
"torch.distributed.get_local_rank",
"torch.isinf",
"torch.empty",
"torch.cuda.random.get_rng_state",
"torch.cuda.amp.GradScaler",
"torch.cuda.random.set_rng_state",
"torch.nn.parallel.DistributedDataParallel",
"numpy.random.get_state",
"torch.cuda.random.set_rng_state_all",
"torch.random.set_rng_state",
"torch.isnan",
"torch.random.get_rng_state",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.nn.DataParallel"
]
] |
MiroK/fenics_ii
|
[
"58c41f0e8dba720962830395851e081b057269cc",
"58c41f0e8dba720962830395851e081b057269cc"
] |
[
"xii/assembler/average_shape.py",
"test/test_embedding.py"
] |
[
"from abc import ABCMeta, abstractmethod\nfrom collections import namedtuple\nfrom itertools import product\nfrom math import pi, sqrt\nimport numpy as np\n\n# import quadpy\n\nfrom numpy.polynomial.legendre import leggauss\nimport dolfin as df\n\nfrom xii.linalg.matrix_utils import is_number\nfrom xii.assembler.average_form import average_space\nfrom xii.meshing.make_mesh_cpp import make_mesh\n\nQuadrature = namedtuple('quadrature', ('points', 'weights'))\n\n\nclass BoundingSurface(metaclass=ABCMeta):\n '''Shape used for reducing a 3d function to 1d by carrying out integration'''\n \n @abstractmethod\n def quadrature(self, x0, n):\n '''Quadrature weights and points for reduction'''\n pass\n\n \nclass Square(BoundingSurface):\n '''\n Square in plane(x0, n) with ll corner given by P(x\\in R^3) -> R^3\n '''\n def __init__(self, P, degree):\n if isinstance(P, (tuple, list, np.ndarray)):\n assert all(is_number(Pi) for Pi in P)\n self.P = lambda x0, p=P: p\n else:\n self.P = P\n\n # Weights for [-1, 1] for 2d will do the tensor product\n self.xq, self.wq = leggauss(degree)\n\n @staticmethod\n def map_from_reference(x0, n, P):\n '''A map of [-1, 1]x[-1, 1] to square(x0, n, P).'''\n n = n / np.linalg.norm(n)\n # C B\n # x0\n # P A\n vec = P - x0\n # We are in the place\n assert abs(np.dot(vec, n)) < 1E-13\n \n A = x0 + vec*np.cos(np.pi/2) + np.cross(n, vec)*np.sin(np.pi/2) + n*(n.dot(vec))*(1-np.cos(np.pi/2))\n C = x0 + vec*np.cos(3*np.pi/2) + np.cross(n, vec)*np.sin(3*np.pi/2) + n*(n.dot(vec))*(1-np.cos(3*np.pi/2))\n\n def mapping(x, P=P, u=A-P, v=C-P):\n x, y = x\n assert abs(x) < 1 + 1E-13 and abs(y) < 1 + 1E-13\n return P + 0.5*u*(1+x) + 0.5*v*(1+y) \n \n return mapping\n\n def quadrature(self, x0, n):\n '''Gaussian qaudrature over the surface of the square'''\n xq, wq = self.xq, self.wq\n \n sq = Square.map_from_reference(x0, n, self.P(x0))\n # 1D\n A, B = sq(np.array([-1, -1])), sq(np.array([-1, 1]))\n size = 0.5*np.linalg.norm(B-A)\n # Scale the weights\n wq = wq*size\n\n # 2D\n wq = list(map(np.prod, product(wq, wq)))\n \n xq = list(map(np.array, product(xq, xq)))\n Txq = list(map(sq, xq))\n \n return Quadrature(Txq, wq)\n\n\nclass SquareRim(BoundingSurface):\n '''\n Boundary of a square in plane(x0, n) with ll corner given by \n P(x\\in R^3) -> R^3\n '''\n def __init__(self, P, degree):\n if isinstance(P, (tuple, list, np.ndarray)):\n assert all(is_number(Pi) for Pi in P)\n self.P = lambda x0, p=P: p\n else:\n self.P = P\n\n # Weights for [-1, 1] for 2d will do the tensor product\n self.xq, self.wq = leggauss(degree)\n\n @staticmethod\n def map_from_reference(x0, n, P):\n '''\n A map of [-1, 1] to 4 points on the boundary of the square(x0, n, P)\n '''\n # Rectangle boundary mapping\n n = n / np.linalg.norm(n)\n # C B\n # x0\n # P A\n vec = P - x0\n # We are in the place\n assert abs(np.dot(vec, n)) < 1E-13\n \n pts = [P,\n x0 + vec*np.cos(np.pi/2) + np.cross(n, vec)*np.sin(np.pi/2) + n*(n.dot(vec))*(1-np.cos(np.pi/2)),\n x0 - (P-x0),\n x0 + vec*np.cos(3*np.pi/2) + np.cross(n, vec)*np.sin(3*np.pi/2) + n*(n.dot(vec))*(1-np.cos(3*np.pi/2))]\n\n def mapping(x, pts=pts):\n assert abs(x) < 1 + 1E-13\n return [0.5*P*(1-x) + 0.5*Q*(1+x) for P, Q in zip(pts, pts[1:]+[pts[0]])]\n \n return mapping\n\n def quadrature(self, x0, n):\n '''Gaussian qaudrature over boundary of the square'''\n xq, wq = self.xq, self.wq\n\n sq_bdry = SquareRim.map_from_reference(x0, n, self.P(x0))\n corners = sq_bdry(-1)\n A, B = corners[:2]\n size = 0.5*np.linalg.norm(B-A)\n # Scale the weights\n wq = wq*size\n # One for each side\n wq = np.repeat(wq, 4)\n\n Txq = sum(list(map(sq_bdry, xq)), [])\n\n return Quadrature(Txq, wq)\n\n\nclass Circle(BoundingSurface):\n '''Circle in plane(x0, n) with radius given by radius(x0)'''\n def __init__(self, radius, degree):\n # Make constant function\n if is_number(radius):\n assert radius > 0\n self.radius = lambda x0, r=radius: r\n # Then this must map points on centerline to radius\n else:\n self.radius = radius\n\n # Will use Gauss quadrature on [-1, 1]\n self.xq, self.wq = leggauss(degree)\n\n @staticmethod\n def map_from_reference(x0, n, R):\n '''\n Map unit circle in z = 0 to plane to circle of radius R with center at x0.\n '''\n n = n / np.linalg.norm(n)\n def transform(x, x0=x0, n=n, R=R):\n norm = np.dot(x, x)\n # Check assumptions\n assert abs(norm - 1) < 1E-13 and abs(x[2]) < 1E-13\n \n y = x - n*np.dot(x, n)\n y = y / np.sqrt(norm - np.dot(x, n)**2)\n return x0 + R*y\n\n return transform\n\n def quadrature(self, x0, n):\n '''Gauss quadratature over the boundary of the circle'''\n xq, wq = self.xq, self.wq\n xq = np.c_[np.cos(np.pi*xq), np.sin(np.pi*xq), np.zeros_like(xq)]\n\n R = self.radius(x0)\n # Circle viewed from reference\n Txq = list(map(Circle.map_from_reference(x0, n, R), xq))\n # Scaled weights (R is jac of T, pi is from theta=pi*(-1, 1)\n wq = wq*R*np.pi\n\n return Quadrature(Txq, wq)\n\n\nclass Disk(BoundingSurface):\n '''Disk in plane(x0, n) with radius given by radius(x0)''' \n def __init__(self, radius, degree):\n # Make constant function\n if is_number(radius):\n assert radius > 0\n self.radius = lambda x0, r=radius: r\n # Then this must map points on centerline to radius\n else:\n self.radius = radius\n\n # Will use quadrature from quadpy over unit disk in z=0 plane\n # and center (0, 0, 0)\n quad = quadpy.disk.Lether(degree)\n self.xq, self.wq = quad.points, quad.weights\n\n @staticmethod\n def map_from_reference(x0, n, R):\n '''\n Map unit disk in z = 0 to plane to disk of radius R with center at x0.\n '''\n n = n / np.linalg.norm(n)\n def transform(x, x0=x0, n=n, R=R):\n norm = np.dot(x, x)\n # Check assumptions\n assert norm < 1 + 1E-13 and abs(x[2]) < 1E-13\n \n y = x - n*np.dot(x, n)\n y = y / np.sqrt(norm - np.dot(x, n)**2)\n return x0 + R*np.sqrt(norm)*y\n\n return transform\n\n def quadrature(self, x0, n):\n '''Quadrature for disk(center x0, normal n, radius x0)'''\n xq, wq = self.xq, self.wq\n \n xq = np.c_[xq, np.zeros_like(wq)]\n\n R = self.radius(x0)\n # Circle viewed from reference\n Txq = list(map(Disk.map_from_reference(x0, n, R), xq))\n # Scaled weights (R is jac of T, pi is from theta=pi*(-1, 1)\n wq = wq*R**2\n\n return Quadrature(Txq, wq)\n\n \n# Testing utils\ndef render_avg_surface(Pi):\n '''Plot the averaging surface via looking at the quadrature points used'''\n V = Pi.function_space()\n\n line_mesh = Pi.average_['mesh']\n shape = Pi.average_['shape']\n \n # Where the average will be represented\n Pi_V = average_space(V, line_mesh)\n\n # We produce a curve of quardrature points for each dof\n surface = []\n \n dm = Pi_V.dofmap()\n dofs_x = np.array(Pi_V.tabulate_dof_coordinates()).reshape((Pi_V.dim(), -1))\n for cell in df.cells(line_mesh):\n v0, v1 = np.array(cell.get_vertex_coordinates()).reshape((2, 3))\n n = v1 - v0\n\n for dof_x in dofs_x[dm.cell_dofs(cell.index())]:\n x = np.row_stack(shape.quadrature(dof_x, n).points)\n surface.append(x)\n\n return surface\n \n# --------------------------------------------------------------------\n\nif __name__ == '__main__':\n from mpl_toolkits.mplot3d import Axes3D\n import matplotlib.pyplot as plt\n\n from xii.assembler.average_form import Average\n from xii.meshing.embedded_mesh import EmbeddedMesh\n import dolfin as df\n\n \n def is_close(a, b, tol=1E-8): return np.linalg.norm(a - b) < tol\n\n def shape_integrate(f, shape, x0, n):\n pts, weights = shape.quadrature(x0, n)\n l = sum(weights)\n return sum(wq*f(*xq) for xq, wq in zip(pts, weights))/l\n\n # Get the MEAN\n mesh = df.BoxMesh(df.Point(-1, -1, -1), df.Point(1, 1, 1), 16, 16, 16)\n # Make 1d\n f = df.MeshFunction('size_t', mesh, 1, 0)\n # df.CompiledSubDomain('near(x[0], x[1]) && near(x[1], x[2])').mark(f, 1)\n df.CompiledSubDomain('near(x[0], 0.) && near(x[1], 0.)').mark(f, 1)\n \n line_mesh = EmbeddedMesh(f, 1)\n\n # Circle ---------------------------------------------------------\n size = 0.125\n ci = Circle(radius=lambda x0: size, degree=12)\n\n u = df.Function(df.FunctionSpace(mesh, 'CG', 1))\n op = Average(u, line_mesh, ci)\n \n surface = render_avg_surface(op)\n \n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n x0 = np.array([0, 0, 0.5])\n n = np.array([0, 0, 1])\n ci_integrate = lambda f, shape=ci, n=n, x0=x0: shape_integrate(f, shape, x0, n)\n \n # Sanity\n f = lambda x, y, z: 1\n value = ci_integrate(f)\n assert is_close(value, 1)\n\n # Odd foo over sym interval\n f = lambda x, y, z: x - y - 0.5\n value = ci_integrate(f)\n assert is_close(value, -0.5)\n\n # Odd foo over sym interval\n f = lambda x, y, z: x**3 - y - z\n value = ci_integrate(f)\n assert is_close(value, -0.5)\n\n # Something that is constant on the dist\n dist = lambda x, y, z: np.dot(np.array([x, y, z])-x0, np.array([x, y, z])-x0)\n assert is_close(ci_integrate(dist), 2*np.pi*size*size**2/(2*pi*size))\n\n # Zero by orthogonality\n null = lambda x, y, z: np.dot(np.array([x, y, z])-x0, n)\n assert is_close(ci_integrate(null), 0.)\n\n f = lambda x, y, z: x**2 + y**2 - z**2\n value = ci_integrate(f)\n assert is_close(value, (size**2 - 0.5**2))\n\n for plane in surface:\n ax.plot3D(plane[:, 0], plane[:, 1], plane[:, 2], marker='o', linestyle='none')\n\n # Square ---------------------------------------------------------\n size = 0.125\n sq = SquareRim(P=lambda x0: x0 - np.array([size, size, 0]), degree=8)\n\n u = df.Function(df.FunctionSpace(mesh, 'CG', 1))\n op = Average(u, line_mesh, sq)\n \n surface = render_avg_surface(op)\n \n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n \n for plane in surface:\n ax.plot3D(plane[:, 0], plane[:, 1], plane[:, 2], marker='o', linestyle='none')\n\n sq_integrate = lambda f, shape=sq, n=n, x0=x0: shape_integrate(f, shape, x0, n)\n # Sanity\n f = lambda x, y, z: 1\n value = sq_integrate(f)\n assert is_close(value, 1)\n\n # Odd foo over sym interval\n f = lambda x, y, z: x - y - 0.5\n value = sq_integrate(f)\n assert is_close(value, -0.5), (value, )\n\n # Odd foo over sym interval\n f = lambda x, y, z: x**3 - y - z\n value = sq_integrate(f)\n assert is_close(value, -0.5)\n\n # Zero by orthogonality\n null = lambda x, y, z: np.dot(np.array([x, y, z])-x0, n)\n assert is_close(sq_integrate(null), 0.)\n\n W = np.linalg.norm(np.array([size, size, 0]))\n # Something harder\n dist = lambda x, y, z: np.dot(np.array([x, y, z]) - x0, np.array([x, y, z])-x0)\n assert is_close(sq_integrate(dist), 4*8*(np.sqrt(2)*W/2)**3/3.)\n\n # Disk ---------------------------------------------------------\n R = 0.125\n di = Disk(radius=lambda x0: R, degree=12)\n\n u = df.Function(df.FunctionSpace(mesh, 'CG', 1))\n op = Average(u, line_mesh, di)\n \n surface = render_avg_surface(op)\n \n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n \n for plane in surface:\n ax.plot3D(plane[:, 0], plane[:, 1], plane[:, 2], marker='o', linestyle='none')\n\n di_integrate = lambda f, shape=di, n=n, x0=x0: shape_integrate(f, shape, x0, n)\n # Sanity\n f = lambda x, y, z: 1\n value = sq_integrate(f)\n assert is_close(value, 1)\n\n # Zero by orthogonality\n null = lambda x, y, z: np.dot(np.array([x, y, z])-x0, n)\n assert is_close(di_integrate(null), 0)\n\n # Something harder\n dist = lambda x, y, z: np.dot(np.array([x, y, z])-x0, np.array([x, y, z])-x0)\n assert is_close(di_integrate(dist), np.pi/2*R**4/(np.pi*R**2))\n \n dist = lambda x, y, z: np.dot(np.array([x, y, z])-x0, np.array([x, y, z])-x0)**2\n assert is_close(di_integrate(dist), np.pi/3*R**6/(np.pi*R**2))\n\n # Square ---------------------------------------------------------\n size = 0.125\n sq = Square(P=lambda x0: x0 - np.array([size, size, 0]), degree=8)\n\n u = df.Function(df.FunctionSpace(mesh, 'CG', 1))\n op = Average(u, line_mesh, sq)\n\n\n from scipy.spatial import Delaunay\n from dolfin import File\n \n surface = render_avg_surface(op)\n\n nodes = np.row_stack(surface)\n tri = Delaunay(nodes)\n\n cells = np.fromiter(tri.simplices.flatten(), dtype='uintp').reshape(tri.simplices.shape)\n \n bounded_volume = make_mesh(nodes, cells, tdim=2, gdim=3)\n File('foo.pvd') << bounded_volume\n \n # for points in surface\n \n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n \n for plane in surface:\n ax.plot3D(plane[:, 0], plane[:, 1], plane[:, 2], marker='o', linestyle='none')\n \n sq_integrate = lambda f, shape=sq, n=n, x0=x0: shape_integrate(f, shape, x0, n)\n\n # Sanity\n one = lambda x, y, z: 1\n assert is_close(sq_integrate(one), 1)\n\n # Zero by orthogonality\n null = lambda x, y, z: np.dot(np.array([x, y, z])-x0, n)\n assert is_close(sq_integrate(null), 0)\n\n W = np.linalg.norm([size, size, 0])\n # Something harder\n area = 2*W**2\n dist = lambda x, y, z: np.dot(np.array([x, y, z])-x0, np.array([x, y, z])-x0)\n assert is_close(sq_integrate(dist), 8*(np.sqrt(2)*W/2)**4/3./area)\n\n plt.show()\n",
"from xii import EmbeddedMesh, ii_assemble, Trace, OuterNormal\nfrom dolfin import *\nimport numpy as np\n\n\ndef test_2d(n=32, tol=1E-10):\n '''[|]'''\n mesh = UnitSquareMesh(n, n)\n cell_f = MeshFunction('size_t', mesh, 2, 2)\n CompiledSubDomain('x[0] < 0.5+DOLFIN_EPS').mark(cell_f, 1)\n\n left = EmbeddedMesh(cell_f, 1)\n right = EmbeddedMesh(cell_f, 2)\n\n facet_f = MeshFunction('size_t', left, 1, 0)\n CompiledSubDomain('near(x[0], 0.5)').mark(facet_f, 1)\n\n iface = EmbeddedMesh(facet_f, 1)\n\n # Now suppose\n facet_f = MeshFunction('size_t', right, 1, 0)\n CompiledSubDomain('near(x[0], 0.5)').mark(facet_f, 2)\n\n # We want to embed\n mappings = iface.compute_embedding(facet_f, 2)\n # Is it correct?\n\n xi, x = iface.coordinates(), right.coordinates()\n assert min(np.linalg.norm(xi[list(mappings[0].keys())]-x[list(mappings[0].values())], 2, 1)) < tol\n\n tdim = right.topology().dim()-1\n right.init(tdim, 0)\n f2v = right.topology()(tdim, 0)\n\n icells = iface.cells()\n\n vertex_match = lambda xs, ys: all(min(np.linalg.norm(ys - x, 2, 1)) < tol for x in xs)\n \n assert all([vertex_match(xi[icells[key]], x[f2v(val)]) for key, val in list(mappings[tdim].items())])\n\n try:\n iface.compute_embedding(facet_f, 2)\n except ValueError:\n pass\n\n V = FunctionSpace(right, 'CG', 1)\n u = interpolate(Expression('x[1]', degree=1), V)\n dx_ = Measure('dx', domain=iface)\n assert abs(ii_assemble(Trace(u, iface)*dx_) - 0.5) < tol\n\n \ndef test_2d_enclosed(n=32, tol=1E-10):\n '''\n |----|\n | [] |\n |----|\n '''\n mesh = UnitSquareMesh(n, n)\n # Lets get the outer part\n cell_f = MeshFunction('size_t', mesh, 2, 1)\n inside = '(x[0] > 0.25-DOLFIN_EPS) && (x[0] < 0.75+DOLFIN_EPS) && (x[1] > 0.25-DOLFIN_EPS) && (x[1] < 0.75+DOLFIN_EPS)'\n CompiledSubDomain(inside).mark(cell_f, 2)\n\n # Stokes ---\n mesh1 = EmbeddedMesh(cell_f, 1)\n\n bdries1 = MeshFunction('size_t', mesh1, mesh1.topology().dim()-1, 0)\n CompiledSubDomain('near(x[0], 0)').mark(bdries1, 10)\n CompiledSubDomain('near(x[0], 1)').mark(bdries1, 20)\n CompiledSubDomain('near(x[1], 0)').mark(bdries1, 30)\n CompiledSubDomain('near(x[1], 1)').mark(bdries1, 40)\n\n CompiledSubDomain('near(x[0], 0.25) && ((x[1] > 0.25-DOLFIN_EPS) && (x[1] < 0.75+DOLFIN_EPS))').mark(bdries1, 1)\n CompiledSubDomain('near(x[0], 0.75) && ((x[1] > 0.25-DOLFIN_EPS) && (x[1] < 0.75+DOLFIN_EPS))').mark(bdries1, 2)\n CompiledSubDomain('near(x[1], 0.25) && ((x[0] > 0.25-DOLFIN_EPS) && (x[0] < 0.75+DOLFIN_EPS))').mark(bdries1, 3)\n CompiledSubDomain('near(x[1], 0.75) && ((x[0] > 0.25-DOLFIN_EPS) && (x[0] < 0.75+DOLFIN_EPS))').mark(bdries1, 4)\n\n # Darcy ---\n mesh2 = EmbeddedMesh(cell_f, 2)\n bdries2 = MeshFunction('size_t', mesh2, mesh2.topology().dim()-1, 0)\n\n CompiledSubDomain('near(x[0], 0.25) && ((x[1] > 0.25-DOLFIN_EPS) && (x[1] < 0.75+DOLFIN_EPS))').mark(bdries2, 1)\n CompiledSubDomain('near(x[0], 0.75) && ((x[1] > 0.25-DOLFIN_EPS) && (x[1] < 0.75+DOLFIN_EPS))').mark(bdries2, 2)\n CompiledSubDomain('near(x[1], 0.25) && ((x[0] > 0.25-DOLFIN_EPS) && (x[0] < 0.75+DOLFIN_EPS))').mark(bdries2, 3)\n CompiledSubDomain('near(x[1], 0.75) && ((x[0] > 0.25-DOLFIN_EPS) && (x[0] < 0.75+DOLFIN_EPS))').mark(bdries2, 4)\n\n # -----------------\n\n # And interface\n bmesh = EmbeddedMesh(bdries2, (1, 2, 3, 4))\n # Embedded it viewwed from stokes\n mappings = bmesh.compute_embedding(bdries1, (1, 2, 3, 4))\n\n xi, x = bmesh.coordinates(), mesh1.coordinates()\n assert min(np.linalg.norm(xi[list(mappings[0].keys())]-x[list(mappings[0].values())], 2, 1)) < tol\n\n tdim = mesh1.topology().dim()-1\n mesh1.init(tdim, 0)\n f2v = mesh1.topology()(tdim, 0)\n\n icells = bmesh.cells()\n\n vertex_match = lambda xs, ys: all(min(np.linalg.norm(ys - x, 2, 1)) < tol for x in xs)\n \n assert all([vertex_match(xi[icells[key]], x[f2v(val)]) for key, val in list(mappings[tdim].items())])\n\n try:\n bmesh.compute_embedding(bdries1, 2)\n except ValueError:\n pass\n\n V = VectorFunctionSpace(mesh1, 'CG', 1)\n u = interpolate(Expression(('x[1]', 'x[0]'), degree=1), V)\n \n dx_ = Measure('dx', domain=bmesh)\n n_ = OuterNormal(bmesh, [0.5, 0.5])\n # Because it is divergence free\n assert abs(ii_assemble(dot(n_, Trace(u, bmesh))*dx_)) < tol\n\n\ndef test_2d_incomplete(n=32, tol=1E-10):\n '''[|]'''\n mesh = UnitSquareMesh(n, n)\n cell_f = MeshFunction('size_t', mesh, 2, 2)\n CompiledSubDomain('x[0] < 0.5+DOLFIN_EPS').mark(cell_f, 1)\n\n left = EmbeddedMesh(cell_f, 1)\n right = EmbeddedMesh(cell_f, 2)\n\n facet_f = MeshFunction('size_t', left, 1, 0)\n CompiledSubDomain('near(x[0], 0.0)').mark(facet_f, 1)\n CompiledSubDomain('near(x[0], 0.5)').mark(facet_f, 2)\n CompiledSubDomain('near(x[1], 0.0)').mark(facet_f, 3)\n CompiledSubDomain('near(x[1], 1.0)').mark(facet_f, 4)\n # More complicated iface\n iface = EmbeddedMesh(facet_f, (1, 2, 3, 4))\n\n # Right will interact with it in a simpler way\n facet_f = MeshFunction('size_t', right, 1, 0)\n CompiledSubDomain('near(x[0], 0.5)').mark(facet_f, 2)\n\n # We want to embed\n mappings = iface.compute_embedding(facet_f, 2)\n # Is it correct?\n\n xi, x = iface.coordinates(), right.coordinates()\n assert min(np.linalg.norm(xi[list(mappings[0].keys())]-x[list(mappings[0].values())], 2, 1)) < tol\n\n tdim = right.topology().dim()-1\n right.init(tdim, 0)\n f2v = right.topology()(tdim, 0)\n\n icells = iface.cells()\n\n vertex_match = lambda xs, ys: all(min(np.linalg.norm(ys - x, 2, 1)) < tol for x in xs)\n \n assert all([vertex_match(xi[icells[key]], x[f2v(val)]) for key, val in list(mappings[tdim].items())])\n\n try:\n iface.compute_embedding(facet_f, 2)\n except ValueError:\n pass\n\n V = FunctionSpace(right, 'CG', 2)\n u = interpolate(Expression('x[1]*x[1]', degree=1), V)\n # FIXME: this passes but looks weird\n dx_ = Measure('dx', domain=iface, subdomain_data=iface.marking_function)\n assert abs(ii_assemble(Trace(u, iface, tag=2)*dx_(2)) - 1./3) < tol\n\n\ndef test_3d(n=4, tol=1E-10):\n '''[|]'''\n mesh = UnitCubeMesh(n, n, n)\n cell_f = MeshFunction('size_t', mesh, 3, 2)\n CompiledSubDomain('x[0] < 0.5+DOLFIN_EPS').mark(cell_f, 1)\n\n left = EmbeddedMesh(cell_f, 1)\n right = EmbeddedMesh(cell_f, 2)\n\n facet_f = MeshFunction('size_t', left, 2, 0)\n CompiledSubDomain('near(x[0], 0.5)').mark(facet_f, 1)\n\n iface = EmbeddedMesh(facet_f, 1)\n\n # Now suppose\n facet_f = MeshFunction('size_t', right, 2, 0)\n CompiledSubDomain('near(x[0], 0.5)').mark(facet_f, 2)\n\n # We want to embed\n mappings = iface.compute_embedding(facet_f, 2)\n # Is it correct?\n\n xi, x = iface.coordinates(), right.coordinates()\n assert min(np.linalg.norm(xi[list(mappings[0].keys())]-x[list(mappings[0].values())], 2, 1)) < tol\n\n tdim = right.topology().dim()-1\n right.init(tdim, 0)\n f2v = right.topology()(tdim, 0)\n\n icells = iface.cells()\n\n vertex_match = lambda xs, ys: all(min(np.linalg.norm(ys - x, 2, 1)) < tol for x in xs)\n \n assert all([vertex_match(xi[icells[key]], x[f2v(val)]) for key, val in list(mappings[tdim].items())])\n\n try:\n iface.compute_embedding(facet_f, 2)\n except ValueError:\n pass\n\n V = FunctionSpace(right, 'CG', 1)\n u = interpolate(Expression('x[0] + x[1]', degree=1), V)\n dx_ = Measure('dx', domain=iface)\n assert abs(ii_assemble(Trace(u, iface)*dx_) - 1.0) < tol\n\n# --------------------------------------------------------------------\n\nif __name__ == '__main__':\n test_2d(64)\n test_2d_incomplete(64)\n test_2d_enclosed(64) \n\n test_3d(4)\n"
] |
[
[
"numpy.polynomial.legendre.leggauss",
"numpy.dot",
"numpy.sqrt",
"scipy.spatial.Delaunay",
"numpy.linalg.norm",
"numpy.cos",
"numpy.sin",
"numpy.row_stack",
"numpy.zeros_like",
"numpy.cross",
"numpy.repeat",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.linalg.norm"
]
] |
saber805/Monkey-recognition
|
[
"432c8f9b599f0159a473033e5bdb55398a86484e"
] |
[
"vgg16.py"
] |
[
"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom PIL import Image\r\nimport scipy.io as scio\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.optim import SGD, Adam\r\nimport hiddenlayer as hl\r\nimport torch.utils.data as Data\r\nfrom torchvision import models\r\nimport torch.nn.functional as F\r\nfrom torchvision import transforms\r\nfrom torchvision.datasets import ImageFolder\r\n\r\n\r\n# vgg16 = models.vgg16(pretrained=True)\r\n# vgg = vgg16.features #vgg仅有卷积和池化\r\n#\r\n# for param in vgg.parameters():\r\n# param.requires_grad_(False)\r\n#\r\n# class MyVggModel(nn.Module):\r\n# def __init__(self):\r\n# super(MyVggModel, self).__init__()\r\n#\r\n# self.vgg = vgg\r\n#\r\n# self.classifier = nn.Sequential(\r\n# nn.Linear(25088, 512),\r\n# nn.Tanh(),\r\n# nn.Dropout(p=0.3),\r\n#\r\n# nn.Linear(512, 256),\r\n# nn.Tanh(),\r\n# nn.Dropout(p=0.3),\r\n#\r\n#\r\n# nn.Linear(256, 10),\r\n# nn.Softmax(dim=1)\r\n# )\r\n#\r\n# def forward(self, x):\r\n# x = self.vgg(x)\r\n# x = x.view(x.size(0), -1)\r\n# output = self.classifier(x)\r\n# return output\r\n#\r\n# def initialize(self):\r\n# for m in self.modules():\r\n# if isinstance(m, nn.Linear):\r\n# tanh_gain = nn.init.calculate_gain('tanh')\r\n#\r\n# nn.init.xavier_uniform_(m.weight.data, gain=tanh_gain)\r\n#\r\n# # nn.init.kaiming_normal_(m.weight.data)\r\n#\r\n#\r\n# Myvggc = MyVggModel()\r\n# #print(Myvggc)\r\n# Myvggc.initialize()\r\n\r\n\r\ntrain_data_transforms = transforms.Compose([\r\n transforms.RandomResizedCrop(224), # 224\r\n transforms.RandomHorizontalFlip(), # 默认概率0.5\r\n transforms.ToTensor(),\r\n # 图像标准化处理\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n])\r\n\r\n\r\nval_data_transforms = transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n #图像标准化处理\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n])\r\n\r\ntrain_data_dir = r\"G:\\data\\monkey\\training\"\r\ntrain_data = ImageFolder(train_data_dir, transform=train_data_transforms)\r\n#ImageFolder假设所有的文件按文件夹保存好,每个文件夹下面存贮同一类别的图片,文件夹的名字为分类的名字\r\n\r\ntrain_data_loader = Data.DataLoader(\r\n train_data,\r\n batch_size=32,\r\n shuffle=True,\r\n num_workers=4,\r\n)\r\n\r\nval_data_dir = r'G:\\data\\monkey\\validation'\r\nval_data = ImageFolder(val_data_dir, transform=val_data_transforms)\r\nprint(val_data)\r\nval_data_loader = Data.DataLoader(val_data, batch_size=32, shuffle=True, num_workers=4)\r\n\r\nprint(\"训练集样本:\", len(train_data.targets))\r\nprint(\"测试集样本:\", len(val_data.targets))\r\n\r\n\r\n'''#获得一个batch的图像\r\nfor step, (b_x,b_y) in enumerate(train_data_loader):\r\n if step > 0:\r\n break\r\n\r\nmean = np.array([0.485, 0.456, 0.406])\r\nstd = np.array([0.229, 0.224, 0.225])\r\nplt.figure(figsize=(12, 6))\r\nfor ii in np.arange(len(b_y)):\r\n image = b_x[ii,:,:,:].numpy().transpose((1,2,0))\r\n image = std * image + mean\r\n image = np.clip(image, 0, 1)\r\n plt.imshow(image)\r\n plt.title(b_y[ii].data.numpy())\r\n plt.axis(\"off\")\r\n plt.show()\r\nplt.subplots_adjust(hspace=0.3)\r\n\r\n'''\r\n\r\n# optimizer = torch.optim.SGD(Myvggc.parameters(), lr=0.003)\r\n# loss_func = nn.CrossEntropyLoss() #交叉熵损失函数,有点难,\r\n#\r\n#\r\n# train_loss = []\r\n# train_c = []\r\n#\r\n# val_loss = []\r\n# val_c = []\r\n# for epoch in range(80):\r\n# train_loss_epoch = 0\r\n# val_loss_epoch = 0\r\n# train_corrects = 0\r\n# val_correct = 0\r\n# Myvggc.train()\r\n# for step, (b_x,b_y) in enumerate(train_data_loader):\r\n#\r\n# # print('training in program:', (len(b_x)*(step+1)) / (len(train_data.targets)))\r\n# output = Myvggc(b_x)\r\n# # print('output:',output)\r\n# loss = loss_func(output, b_y)\r\n# # print('loss:',loss.item())\r\n# pre_lab = torch.argmax(output, 1)\r\n# print('b_y:',b_y)\r\n# print('pre_:',pre_lab)\r\n#\r\n#\r\n# optimizer.zero_grad()\r\n# loss.backward()\r\n# optimizer.step()\r\n#\r\n# train_loss_epoch += loss.item() * b_x.size(0)\r\n# train_corrects += torch.sum(pre_lab == b_y)\r\n#\r\n#\r\n# print('**'*10,'已完成', epoch+1,'轮')\r\n#\r\n# train_loss.append(train_loss_epoch / len(train_data.targets) )\r\n# train_c.append(train_corrects.double() / len(train_data.targets))\r\n# print('train_loss:', train_loss)\r\n# print('train_accurary:', train_c)\r\n#\r\n# Myvggc.eval()\r\n#\r\n# for step, (val_x, val_y) in enumerate(val_data_loader):\r\n# output = Myvggc(val_x)\r\n# loss = loss_func(output, val_y)\r\n# pre_lab = torch.argmax(output, 1)\r\n# val_loss_epoch += loss.item() * val_x.size(0)\r\n# val_correct += torch.sum(pre_lab == val_y)\r\n#\r\n#\r\n#\r\n# val_loss.append(val_loss_epoch/ len(val_data.targets))\r\n# val_c.append(val_correct / len(val_data.targets))\r\n# print(\"val_loss:\", val_loss)\r\n# print(\"val_accurary:\", val_c.item())\r\n#\r\n#\r\n# torch.save(Myvggc, 'vgg16_monky.pkl', _use_new_zipfile_serialization=False)\r\n#\r\n# scio.savemat('val_loss.mat', {'val_loss':val_loss})\r\n# scio.savemat('val_c.mat', {'val_c':val_c})\r\n# scio.savemat('train_loss.mat', {'train_loss':train_loss})\r\n# scio.savemat('train_c.mat', {'train_c':train_c})"
] |
[
[
"torch.utils.data.DataLoader"
]
] |
mktsasaki/Gasyori100knock
|
[
"fcaf0170035f3cef7d25baa534d69e44165ab7a3"
] |
[
"Question_21_30/answers/answer_29.py"
] |
[
"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# Affine\ndef affine(img, a, b, c, d, tx, ty):\n\tH, W, C = img.shape\n\n\t# temporary image\n\t# 外周を(0, 0, 0)で埋めた画像からサンプリング\n\t_img = np.zeros((H+2, W+2, C), dtype=np.float32)\n\t_img[1:H+1, 1:W+1] = img\n\n\t# get new image shape\n\tH_new = np.round(H * d).astype(np.int)\n\tW_new = np.round(W * a).astype(np.int)\n\tout = np.zeros((H_new+1, W_new+1, C), dtype=np.float32)\n\n\t# get position of new image\n\tx_new = np.tile(np.arange(W_new), (H_new, 1))\n\ty_new = np.arange(H_new).repeat(W_new).reshape(H_new, -1)\n\n\t# get position of temporary image by affine\n\t# x, yの最後の+1は外周のオフセット分\n\tadbc = a * d - b * c\n\tx = np.round((d * x_new - b * y_new) / adbc).astype(np.int) - tx + 1\n\ty = np.round((-c * x_new + a * y_new) / adbc).astype(np.int) - ty + 1\n\n\tx = np.minimum(np.maximum(x, 0), W+1).astype(np.int)\n\ty = np.minimum(np.maximum(y, 0), H+1).astype(np.int)\n\n\t# assgin pixcel to new image\n\tout[y_new, x_new] = _img[y, x]\n\n\tout = out[:H_new, :W_new]\n\tout = out.astype(np.uint8)\n\n\treturn out\n\n\n# Read image\nimg = cv2.imread(\"imori.jpg\").astype(np.float32)\n\n# Affine\nout = affine(img, a=1.3, b=0, c=0, d=0.8, tx=30, ty=-30)\n\n\n# Save result\ncv2.imshow(\"result\", out)\ncv2.waitKey(0)\ncv2.imwrite(\"out.jpg\", out)\n"
] |
[
[
"numpy.round",
"numpy.arange",
"numpy.maximum",
"numpy.zeros"
]
] |
hyeonLewis/MeshTransformer
|
[
"1daf18cd97896656e84c3634bc0ab91be38981c0"
] |
[
"metro/utils/renderer.py"
] |
[
"\"\"\"\nRendering tools for 3D mesh visualization on 2D image.\n\nParts of the code are taken from https://github.com/akanazawa/hmr\n\nModified by hyeonwoo Kim, openDR to pytorch3d\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport cv2\nimport code\nimport os\nimport sys\nimport torch\nimport pytorch3d\nimport random\n# Data structures and functions for rendering\nfrom pytorch3d.transforms import (\n Transform3d\n)\nfrom pytorch3d.structures import Meshes\nfrom pytorch3d.renderer import (\n look_at_view_transform,\n FoVPerspectiveCameras, \n PerspectiveCameras,\n PointLights, \n Materials, \n RasterizationSettings, \n MeshRenderer, \n MeshRasterizer, \n SoftPhongShader,\n TexturesUV,\n TexturesVertex\n)\nos.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\nsys.path.append(os.path.abspath(''))\n\n# Rotate the points by a specified angle.\ndef rotateY(points, angle):\n ry = np.array([\n [np.cos(angle), 0., np.sin(angle)], [0., 1., 0.],\n [-np.sin(angle), 0., np.cos(angle)]\n ])\n return np.dot(points, ry)\n\ndef draw_skeleton(input_image, joints, draw_edges=True, vis=None, radius=None):\n \"\"\"\n joints is 3 x 19. but if not will transpose it.\n 0: Right ankle\n 1: Right knee\n 2: Right hip\n 3: Left hip\n 4: Left knee\n 5: Left ankle\n 6: Right wrist\n 7: Right elbow\n 8: Right shoulder\n 9: Left shoulder\n 10: Left elbow\n 11: Left wrist\n 12: Neck\n 13: Head top\n 14: nose\n 15: left_eye\n 16: right_eye\n 17: left_ear\n 18: right_ear\n \"\"\"\n\n if radius is None:\n radius = max(4, (np.mean(input_image.shape[:2]) * 0.01).astype(int))\n\n colors = {\n 'pink': (197, 27, 125), # L lower leg\n 'light_pink': (233, 163, 201), # L upper leg\n 'light_green': (161, 215, 106), # L lower arm\n 'green': (77, 146, 33), # L upper arm\n 'red': (215, 48, 39), # head\n 'light_red': (252, 146, 114), # head\n 'light_orange': (252, 141, 89), # chest\n 'purple': (118, 42, 131), # R lower leg\n 'light_purple': (175, 141, 195), # R upper\n 'light_blue': (145, 191, 219), # R lower arm\n 'blue': (69, 117, 180), # R upper arm\n 'gray': (130, 130, 130), #\n 'white': (255, 255, 255), #\n }\n\n image = input_image.copy()\n input_is_float = False\n\n if np.issubdtype(image.dtype, np.float):\n input_is_float = True\n max_val = image.max()\n if max_val <= 2.: # should be 1 but sometimes it's slightly above 1\n image = (image * 255).astype(np.uint8)\n else:\n image = (image).astype(np.uint8)\n\n if joints.shape[0] != 2:\n joints = joints.T\n joints = np.round(joints).astype(int)\n\n jcolors = [\n 'light_pink', 'light_pink', 'light_pink', 'pink', 'pink', 'pink',\n 'light_blue', 'light_blue', 'light_blue', 'blue', 'blue', 'blue',\n 'purple', 'purple', 'red', 'green', 'green', 'white', 'white',\n 'purple', 'purple', 'red', 'green', 'green', 'white', 'white'\n ]\n\n if joints.shape[1] == 19:\n # parent indices -1 means no parents\n parents = np.array([\n 1, 2, 8, 9, 3, 4, 7, 8, 12, 12, 9, 10, 14, -1, 13, -1, -1, 15, 16\n ])\n # Left is light and right is dark\n ecolors = {\n 0: 'light_pink',\n 1: 'light_pink',\n 2: 'light_pink',\n 3: 'pink',\n 4: 'pink',\n 5: 'pink',\n 6: 'light_blue',\n 7: 'light_blue',\n 8: 'light_blue',\n 9: 'blue',\n 10: 'blue',\n 11: 'blue',\n 12: 'purple',\n 17: 'light_green',\n 18: 'light_green',\n 14: 'purple'\n }\n elif joints.shape[1] == 14:\n parents = np.array([\n 1,\n 2,\n 8,\n 9,\n 3,\n 4,\n 7,\n 8,\n -1,\n -1,\n 9,\n 10,\n 13,\n -1,\n ])\n ecolors = {\n 0: 'light_pink',\n 1: 'light_pink',\n 2: 'light_pink',\n 3: 'pink',\n 4: 'pink',\n 5: 'pink',\n 6: 'light_blue',\n 7: 'light_blue',\n 10: 'light_blue',\n 11: 'blue',\n 12: 'purple'\n }\n elif joints.shape[1] == 21: # hand\n parents = np.array([\n -1,\n 0,\n 1,\n 2,\n 3,\n 0,\n 5,\n 6,\n 7,\n 0,\n 9,\n 10,\n 11,\n 0,\n 13,\n 14,\n 15,\n 0,\n 17,\n 18,\n 19,\n ])\n ecolors = {\n 0: 'light_purple',\n 1: 'light_green',\n 2: 'light_green',\n 3: 'light_green',\n 4: 'light_green',\n 5: 'pink',\n 6: 'pink',\n 7: 'pink',\n 8: 'pink',\n 9: 'light_blue',\n 10: 'light_blue',\n 11: 'light_blue',\n 12: 'light_blue',\n 13: 'light_red',\n 14: 'light_red',\n 15: 'light_red',\n 16: 'light_red',\n 17: 'purple',\n 18: 'purple',\n 19: 'purple',\n 20: 'purple',\n }\n else:\n print('Unknown skeleton!!')\n\n for child in range(len(parents)):\n point = joints[:, child]\n # If invisible skip\n if vis is not None and vis[child] == 0:\n continue\n if draw_edges:\n cv2.circle(image, (point[0], point[1]), radius, colors['white'],\n -1)\n cv2.circle(image, (point[0], point[1]), radius - 1,\n colors[jcolors[child]], -1)\n else:\n # cv2.circle(image, (point[0], point[1]), 5, colors['white'], 1)\n cv2.circle(image, (point[0], point[1]), radius - 1,\n colors[jcolors[child]], 1)\n # cv2.circle(image, (point[0], point[1]), 5, colors['gray'], -1)\n pa_id = parents[child]\n if draw_edges and pa_id >= 0:\n if vis is not None and vis[pa_id] == 0:\n continue\n point_pa = joints[:, pa_id]\n cv2.circle(image, (point_pa[0], point_pa[1]), radius - 1,\n colors[jcolors[pa_id]], -1)\n if child not in ecolors.keys():\n print('bad')\n import ipdb\n ipdb.set_trace()\n cv2.line(image, (point[0], point[1]), (point_pa[0], point_pa[1]),\n colors[ecolors[child]], radius - 2)\n\n # Convert back in original dtype\n if input_is_float:\n if max_val <= 1.:\n image = image.astype(np.float32) / 255.\n else:\n image = image.astype(np.float32)\n\n return image\n\ndef draw_text(input_image, content):\n \"\"\"\n content is a dict. draws key: val on image\n Assumes key is str, val is float\n \"\"\"\n image = input_image.copy()\n input_is_float = False\n if np.issubdtype(image.dtype, np.float):\n input_is_float = True\n image = (image * 255).astype(np.uint8)\n\n black = (255, 255, 0)\n margin = 15\n start_x = 5\n start_y = margin\n for key in sorted(content.keys()):\n text = \"%s: %.2g\" % (key, content[key])\n cv2.putText(image, text, (start_x, start_y), 0, 0.45, black)\n start_y += margin\n\n if input_is_float:\n image = image.astype(np.float32) / 255.\n return image\n\ndef visualize_reconstruction(img, img_size, gt_kp, vertices, pred_kp, camera, renderer, color='pink', focal_length=1000):\n \"\"\"Overlays gt_kp and pred_kp on img.\n Draws vert with text.\n Renderer is an instance of SMPLRenderer.\n camera size = [B, 3]\n \"\"\"\n gt_vis = gt_kp[:, 2].astype(bool)\n loss = np.sum((gt_kp[gt_vis, :2] - pred_kp[gt_vis])**2)\n debug_text = {\"sc\": camera[0], \"tx\": camera[1], \"ty\": camera[2], \"kpl\": loss}\n # Fix a flength so i can render this with persp correct scale\n res = img.shape[1]\n camera_t = np.array([camera[1], camera[2], 2*focal_length/(res * camera[0] +1e-9)]) #camera_translation\n \n rend_img = renderer.visualize_mesh(vertices, camera_t, img) #Composed with pytorch3d\n\n rend_img = draw_text(rend_img, debug_text)\n\n # Draw skeleton\n gt_joint = ((gt_kp[:, :2] + 1) * 0.5) * img_size\n pred_joint = ((pred_kp + 1) * 0.5) * img_size\n img_with_gt = draw_skeleton( img, gt_joint, draw_edges=False, vis=gt_vis)\n skel_img = draw_skeleton(img_with_gt, pred_joint)\n\n combined = np.hstack([skel_img, rend_img])\n\n return combined\n\ndef visualize_reconstruction_test(img, img_size, gt_kp, vertices, pred_kp, camera, renderer, score, color='pink', focal_length=1000):\n \"\"\"Overlays gt_kp and pred_kp on img.\n Draws vert with text.\n Renderer is an instance of SMPLRenderer.\n \"\"\"\n gt_vis = gt_kp[:, 2].astype(bool)\n loss = np.sum((gt_kp[gt_vis, :2] - pred_kp[gt_vis])**2)\n debug_text = {\"sc\": camera[0], \"tx\": camera[1], \"ty\": camera[2], \"kpl\": loss, \"pa-mpjpe\": score*1000}\n # Fix a flength so i can render this with persp correct scale\n res = img.shape[1]\n camera_t = np.array([camera[1], camera[2], 2*focal_length/(res * camera[0] +1e-9)])\n\n rend_img = renderer.visualize_mesh(vertices, camera_t, img)#Composed with pytorch3d\n\n rend_img = draw_text(rend_img, debug_text)\n\n # Draw skeleton\n gt_joint = ((gt_kp[:, :2] + 1) * 0.5) * img_size\n pred_joint = ((pred_kp + 1) * 0.5) * img_size\n img_with_gt = draw_skeleton( img, gt_joint, draw_edges=False, vis=gt_vis)\n skel_img = draw_skeleton(img_with_gt, pred_joint)\n\n combined = np.hstack([skel_img, rend_img])\n\n return combined\n\n\n\ndef visualize_reconstruction_and_att(img, img_size, vertices_full, vertices, vertices_2d, camera, renderer, ref_points, attention, focal_length=1000):\n \"\"\"Overlays gt_kp and pred_kp on img.\n Draws vert with text.\n Renderer is an instance of SMPLRenderer.\n \"\"\"\n # Fix a flength so i can render this with persp correct scale\n res = img.shape[1]\n camera_t = np.array([camera[1], camera[2], 2*focal_length/(res * camera[0] +1e-9)])\n\n rend_img = renderer.visualize_mesh(vertices_full, camera_t, img)#Composed with pytorch3d\n\n heads_num, vertex_num, _ = attention.shape\n\n all_head = np.zeros((vertex_num,vertex_num))\n\n ###### find max\n # for i in range(vertex_num):\n # for j in range(vertex_num):\n # all_head[i,j] = np.max(attention[:,i,j])\n\n ##### find avg\n for h in range(4):\n att_per_img = attention[h]\n all_head = all_head + att_per_img \n all_head = all_head/4\n\n col_sums = all_head.sum(axis=0)\n all_head = all_head / col_sums[np.newaxis, :]\n\n\n # code.interact(local=locals())\n\n combined = []\n if vertex_num>400: # body\n selected_joints = [6,7,4,5,13] # [6,7,4,5,13,12] \n else: # hand \n selected_joints = [0, 4, 8, 12, 16, 20]\n # Draw attention\n for ii in range(len(selected_joints)):\n reference_id = selected_joints[ii]\n ref_point = ref_points[reference_id]\n attention_to_show = all_head[reference_id][14::] \n min_v = np.min(attention_to_show)\n max_v = np.max(attention_to_show)\n norm_attention_to_show = (attention_to_show - min_v)/(max_v-min_v)\n\n vertices_norm = ((vertices_2d + 1) * 0.5) * img_size\n ref_norm = ((ref_point + 1) * 0.5) * img_size\n image = np.zeros_like(rend_img)\n\n for jj in range(vertices_norm.shape[0]):\n x = int(vertices_norm[jj,0]) \n y = int(vertices_norm[jj,1])\n cv2.circle(image,(x,y), 1, (255,255,255), -1) \n\n total_to_draw = []\n for jj in range(vertices_norm.shape[0]):\n thres = 0.0\n if norm_attention_to_show[jj]>thres:\n things = [norm_attention_to_show[jj], ref_norm, vertices_norm[jj]]\n total_to_draw.append(things)\n # plot_one_line(ref_norm, vertices_norm[jj], image, reference_id, alpha=0.4*(norm_attention_to_show[jj]-thres)/(1-thres) )\n total_to_draw.sort()\n max_att_score = total_to_draw[-1][0]\n for item in total_to_draw:\n attention_score = item[0]\n ref_point = item[1]\n vertex = item[2]\n plot_one_line(ref_point, vertex, image, ii, alpha=(attention_score-thres)/(max_att_score-thres) )\n # code.interact(local=locals())\n if len(combined)==0:\n combined = image\n else:\n combined = np.hstack([combined, image])\n \n\n final = np.hstack([img, combined, rend_img])\n\n return final\n\n\ndef visualize_reconstruction_and_att_local(img, img_size, vertices_full, vertices, vertices_2d, camera, renderer, ref_points, attention, color='light_blue', focal_length=1000):\n \"\"\"Overlays gt_kp and pred_kp on img.\n Draws vert with text.\n Renderer is an instance of SMPLRenderer.\n \"\"\"\n # Fix a flength so i can render this with persp correct scale\n res = img.shape[1]\n camera_t = np.array([camera[1], camera[2], 2*focal_length/(res * camera[0] +1e-9)])\n\n rend_img = renderer.visualize_mesh(vertices_full, camera_t, img) #Composed with pytorch3d\n\n heads_num, vertex_num, _ = attention.shape\n all_head = np.zeros((vertex_num,vertex_num))\n\n ##### compute avg attention for 4 attention heads\n for h in range(4):\n att_per_img = attention[h]\n all_head = all_head + att_per_img \n all_head = all_head/4\n\n col_sums = all_head.sum(axis=0)\n all_head = all_head / col_sums[np.newaxis, :]\n\n combined = []\n if vertex_num>400: # body\n selected_joints = [7] # [6,7,4,5,13,12] \n else: # hand \n selected_joints = [0] # [0, 4, 8, 12, 16, 20] \n # Draw attention\n for ii in range(len(selected_joints)):\n reference_id = selected_joints[ii]\n ref_point = ref_points[reference_id]\n attention_to_show = all_head[reference_id][14::] \n min_v = np.min(attention_to_show)\n max_v = np.max(attention_to_show)\n norm_attention_to_show = (attention_to_show - min_v)/(max_v-min_v)\n vertices_norm = ((vertices_2d + 1) * 0.5) * img_size\n ref_norm = ((ref_point + 1) * 0.5) * img_size\n image = rend_img*0.4\n\n total_to_draw = []\n for jj in range(vertices_norm.shape[0]):\n thres = 0.0\n if norm_attention_to_show[jj]>thres:\n things = [norm_attention_to_show[jj], ref_norm, vertices_norm[jj]]\n total_to_draw.append(things)\n total_to_draw.sort()\n max_att_score = total_to_draw[-1][0]\n for item in total_to_draw:\n attention_score = item[0]\n ref_point = item[1]\n vertex = item[2]\n plot_one_line(ref_point, vertex, image, ii, alpha=(attention_score-thres)/(max_att_score-thres) )\n\n for jj in range(vertices_norm.shape[0]):\n x = int(vertices_norm[jj,0])\n y = int(vertices_norm[jj,1])\n cv2.circle(image,(x,y), 1, (255,255,255), -1) \n\n if len(combined)==0:\n combined = image\n else:\n combined = np.hstack([combined, image])\n\n final = np.hstack([img, combined, rend_img])\n\n return final\n\n\ndef visualize_reconstruction_no_text(img, img_size, vertices, camera, renderer, color='pink', focal_length=1000):\n \"\"\"Overlays gt_kp and pred_kp on img.\n Draws vert with text.\n Renderer is an instance of SMPLRenderer.\n \"\"\"\n # Fix a flength so i can render this with persp correct scale\n res = img.shape[1]\n camera_t = np.array([camera[1], camera[2], 2*focal_length/(res * camera[0] +1e-9)])\n\n rend_img = renderer.visualize_mesh(vertices, camera_t, img) #Composed with pytorch3d\n\n combined = np.hstack([img, rend_img])\n\n return combined\n\n\ndef plot_one_line(ref, vertex, img, color_index, alpha=0.0, line_thickness=None):\n # 13,6,7,8,3,4,5\n # att_colors = [(255, 221, 104), (255, 255, 0), (255, 215, 227), (210, 240, 119), \\\n # (209, 238, 245), (244, 200, 243), (233, 242, 216)] \n att_colors = [(255, 255, 0), (244, 200, 243), (210, 243, 119), (209, 238, 255), (200, 208, 255), (250, 238, 215)] \n\n\n overlay = img.copy()\n # output = img.copy()\n # Plots one bounding box on image img\n tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness\n\n color = list(att_colors[color_index])\n c1, c2 = (int(ref[0]), int(ref[1])), (int(vertex[0]), int(vertex[1]))\n cv2.line(overlay, c1, c2, (alpha*float(color[0])/255,alpha*float(color[1])/255,alpha*float(color[2])/255) , thickness=tl, lineType=cv2.LINE_AA)\n cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0, img)\n\n\n\ndef cam2pixel(cam_coord, f, c):\n x = cam_coord[:, 0] / (cam_coord[:, 2]) * f[0] + c[0]\n y = cam_coord[:, 1] / (cam_coord[:, 2]) * f[1] + c[1]\n z = cam_coord[:, 2]\n img_coord = np.concatenate((x[:,None], y[:,None], z[:,None]),1)\n return img_coord\n\nclass Renderer(object):\n\n def __init__(self, focal_length = 1000, img_res = 224, *args, faces):\n self.focal_length = ((focal_length, focal_length),)\n self.camera_center = ((img_res // 2 , img_res // 2),)\n self.img_res = img_res\n self.faces = faces\n\n def visualize_mesh(self, vertices, camera_t, img):\n device = torch.device('cuda:0')\n rend_img = self.__call__(vertices, camera_t, img, device).float() #returns [1, 224, 224, 4]\n rend_img = rend_img[0, ... , :3] #[224, 224, 3]\n rend_img_cpu = rend_img.cpu()\n rend_img = (cv2.rotate(rend_img_cpu.numpy(), cv2.ROTATE_180))\n rend_img = self.overlay_img(rend_img, img)\n\n return rend_img\n \n #Added function - Blends original image and rendered mesh\n def overlay_img(self, rend_img, img):\n mask = (rend_img == 1)[:,:,:,None]\n mask = torch.from_numpy(mask).squeeze()\n mask = mask.cpu().numpy()\n output = rend_img[:,:,:3] * ~mask + mask * img\n\n return output\n\n def __call__(self, vertices, camera_t, img, device):\n R, T = torch.from_numpy(np.eye(3)).unsqueeze(dim = 0), torch.from_numpy(camera_t).unsqueeze(dim = 0)\n\n cameras = PerspectiveCameras(device = device,focal_length=self.focal_length, principal_point = self.camera_center, R = R, T = T, image_size = ((self.img_res, self.img_res),), in_ndc=False)\n\n raster_settings = RasterizationSettings(\n image_size = self.img_res,\n blur_radius = 0.0)\n\n #lights = PointLights(device=device, location=[[2, 8, 2.1]])\n lights = PointLights(device=device, location=[[5, 5, -5]])\n renderer = MeshRenderer(\n rasterizer = MeshRasterizer(\n cameras = cameras,\n raster_settings=raster_settings\n ),\n shader=SoftPhongShader( #change the shader\n device = device,\n cameras=cameras,\n lights=lights\n )\n )\n\n #Set reflected light color and shininess of mesh\n '''\n materials = Materials(\n device = device,\n specular_color=[[0.0, 1.0, 0.0]],\n shininess=10.0\n )\n '''\n #r, g, b = 1, 192/255.0, 203/255.0 #For pink mesh\n\n vertices = torch.from_numpy(vertices).to(device)\n verts_rgb = torch.ones_like(vertices)[None]\n #verts_rgb[:, :, 1] = g\n #verts_rgb[:, :, 2] = b\n textures = TexturesVertex(verts_features=verts_rgb.to(device)) #part segmentation\n\n vertices = vertices.reshape(-1, vertices.shape[0], vertices.shape[1])\n faces = torch.from_numpy(self.faces.astype(np.float32))\n faces = faces.unsqueeze(dim = 0).to(device)\n \n mesh = Meshes(\n verts = vertices,\n faces = faces,\n textures = textures\n )\n return renderer(mesh, cameras = cameras, lights = lights)\n"
] |
[
[
"numpy.dot",
"numpy.hstack",
"torch.ones_like",
"numpy.min",
"numpy.issubdtype",
"numpy.eye",
"numpy.cos",
"torch.from_numpy",
"numpy.sin",
"numpy.concatenate",
"numpy.max",
"numpy.round",
"numpy.zeros_like",
"numpy.mean",
"torch.device",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
skybigzhou/Deep-Person-ReId
|
[
"8e42089163736ee8c92cd8b2ffcd509e69cf1a73"
] |
[
"utils.py"
] |
[
"from __future__ import absolute_import\nimport os\nimport sys\nimport errno\nimport shutil\nimport json\nimport os.path as osp\n\nimport torch\n\ndef mkdir_if_missing(directory):\n if not osp.exists(directory):\n try:\n os.makedirs(directory)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value.\n \n Code imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262\n \"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\ndef save_checkpoint(state, is_best, fpath='checkpoint.pth.tar'):\n mkdir_if_missing(osp.dirname(fpath))\n torch.save(state, fpath)\n if is_best:\n shutil.copy(fpath, osp.join(osp.dirname(fpath), 'best_model.pth.tar'))\n\nclass Logger(object):\n \"\"\"\n Write console output to external text file.\n Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py.\n \"\"\"\n def __init__(self, fpath=None):\n self.console = sys.stdout\n self.file = None\n if fpath is not None:\n mkdir_if_missing(os.path.dirname(fpath))\n self.file = open(fpath, 'w')\n\n def __del__(self):\n self.close()\n\n def __enter__(self):\n pass\n\n def __exit__(self, *args):\n self.close()\n\n def write(self, msg):\n self.console.write(msg)\n if self.file is not None:\n self.file.write(msg)\n\n def flush(self):\n self.console.flush()\n if self.file is not None:\n self.file.flush()\n os.fsync(self.file.fileno())\n\n def close(self):\n self.console.close()\n if self.file is not None:\n self.file.close()\n\ndef read_json(fpath):\n with open(fpath, 'r') as f:\n obj = json.load(f)\n return obj\n\ndef write_json(obj, fpath):\n mkdir_if_missing(osp.dirname(fpath))\n with open(fpath, 'w') as f:\n json.dump(obj, f, indent=4, separators=(',', ': '))"
] |
[
[
"torch.save"
]
] |
chenmingjian/HigherHRNet-Human-Pose-Estimation
|
[
"6237e9c75bb08d8aa50a8e7f93e65e310d218a68"
] |
[
"tools/valid.py"
] |
[
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# Modified by Bowen Cheng ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport pprint\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn.parallel\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms\nimport torch.multiprocessing\nfrom tqdm import tqdm\n\nimport _init_paths\nimport models\n\nfrom config import cfg\nfrom config import check_config\nfrom config import update_config\nfrom core.inference import get_multi_stage_outputs\nfrom core.inference import aggregate_results\nfrom core.group import HeatmapParser\nfrom dataset import make_test_dataloader\nfrom fp16_utils.fp16util import network_to_half\nfrom utils.utils import create_logger\nfrom utils.utils import get_model_summary\nfrom utils.vis import save_debug_images\nfrom utils.vis import save_valid_image\nfrom utils.transforms import resize_align_multi_scale\nfrom utils.transforms import get_final_preds\nfrom utils.transforms import get_multi_scale_size\n\ntorch.multiprocessing.set_sharing_strategy('file_system')\n\nimport sys\nsys.path.append(\"/home/chen/workshop/thesis/HRNet-Bottom-Up-Pose-Estimation/lib/core\")\nfrom rescore import rescore_valid\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Test keypoints network')\n # general\n parser.add_argument('--cfg',\n help='experiment configure file name',\n required=True,\n type=str)\n\n parser.add_argument('opts',\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER)\n\n args = parser.parse_args()\n\n return args\n\n\n# markdown format output\ndef _print_name_value(logger, name_value, full_arch_name):\n names = name_value.keys()\n values = name_value.values()\n num_values = len(name_value)\n logger.info(\n '| Arch ' +\n ' '.join(['| {}'.format(name) for name in names]) +\n ' |'\n )\n logger.info('|---' * (num_values+1) + '|')\n\n if len(full_arch_name) > 15:\n full_arch_name = full_arch_name[:8] + '...'\n logger.info(\n '| ' + full_arch_name + ' ' +\n ' '.join(['| {:.3f}'.format(value) for value in values]) +\n ' |'\n )\n\n\ndef main():\n args = parse_args()\n update_config(cfg, args)\n check_config(cfg)\n\n logger, final_output_dir, tb_log_dir = create_logger(\n cfg, args.cfg, 'valid'\n )\n\n logger.info(pprint.pformat(args))\n logger.info(cfg)\n\n # cudnn related setting\n cudnn.benchmark = cfg.CUDNN.BENCHMARK\n torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC\n torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED\n\n model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(\n cfg, is_train=False\n )\n\n dump_input = torch.rand(\n (1, 3, cfg.DATASET.INPUT_SIZE, cfg.DATASET.INPUT_SIZE)\n )\n logger.info(get_model_summary(model, dump_input, verbose=cfg.VERBOSE))\n\n if cfg.FP16.ENABLED:\n model = network_to_half(model)\n\n if cfg.TEST.MODEL_FILE:\n logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))\n model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=True)\n else:\n model_state_file = os.path.join(\n final_output_dir, 'model_best.pth.tar'\n )\n logger.info('=> loading model from {}'.format(model_state_file))\n model.load_state_dict(torch.load(model_state_file))\n\n model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()\n model.eval()\n\n data_loader, test_dataset = make_test_dataloader(cfg)\n\n if cfg.MODEL.NAME == 'pose_hourglass':\n transforms = torchvision.transforms.Compose(\n [\n torchvision.transforms.ToTensor(),\n ]\n )\n else:\n transforms = torchvision.transforms.Compose(\n [\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )\n ]\n )\n\n parser = HeatmapParser(cfg)\n all_preds = []\n all_scores = []\n\n # pbar = tqdm(total=len(test_dataset)) if cfg.TEST.LOG_PROGRESS else None\n pbar = tqdm(total=len(test_dataset))\n for i, (images, annos) in enumerate(data_loader):\n assert 1 == images.size(0), 'Test batch size should be 1'\n \n image = images[0].cpu().numpy()\n # size at scale 1.0\n base_size, center, scale = get_multi_scale_size(\n image, cfg.DATASET.INPUT_SIZE, 1.0, min(cfg.TEST.SCALE_FACTOR)\n )\n\n with torch.no_grad():\n final_heatmaps = None\n tags_list = []\n for idx, s in enumerate(sorted(cfg.TEST.SCALE_FACTOR, reverse=True)):\n input_size = cfg.DATASET.INPUT_SIZE\n image_resized, center, scale = resize_align_multi_scale(\n image, input_size, s, min(cfg.TEST.SCALE_FACTOR)\n )\n image_resized = transforms(image_resized)\n image_resized = image_resized.unsqueeze(0).cuda()\n\n outputs, heatmaps, tags = get_multi_stage_outputs(\n cfg, model, image_resized, cfg.TEST.FLIP_TEST,\n cfg.TEST.PROJECT2IMAGE, base_size\n )\n\n final_heatmaps, tags_list = aggregate_results(\n cfg, s, final_heatmaps, tags_list, heatmaps, tags\n )\n\n final_heatmaps = final_heatmaps / float(len(cfg.TEST.SCALE_FACTOR))\n tags = torch.cat(tags_list, dim=4)\n grouped, scores = parser.parse(\n final_heatmaps, tags, cfg.TEST.ADJUST, cfg.TEST.REFINE\n )\n\n final_results = get_final_preds(\n grouped, center, scale,\n [final_heatmaps.size(3), final_heatmaps.size(2)]\n )\n if cfg.RESCORE.USE:\n try:\n scores = rescore_valid(cfg, final_results, scores)\n except:\n print(\"got one.\")\n # if cfg.TEST.LOG_PROGRESS:\n # pbar.update()\n pbar.update()\n\n if i % cfg.PRINT_FREQ == 0:\n prefix = '{}_{}'.format(os.path.join(final_output_dir, 'result_valid'), i)\n # logger.info('=> write {}'.format(prefix))\n save_valid_image(image, final_results, '{}.jpg'.format(prefix), dataset=test_dataset.name)\n # for scale_idx in range(len(outputs)):\n # prefix_scale = prefix + '_output_{}'.format(\n # # cfg.DATASET.OUTPUT_SIZE[scale_idx]\n # scale_idx\n # )\n # save_debug_images(\n # cfg, images, None, None,\n # outputs[scale_idx], prefix_scale\n # )\n all_preds.append(final_results)\n all_scores.append(scores)\n\n if cfg.TEST.LOG_PROGRESS:\n pbar.close()\n\n name_values, _ = test_dataset.evaluate(\n cfg, all_preds, all_scores, final_output_dir\n )\n\n if isinstance(name_values, list):\n for name_value in name_values:\n _print_name_value(logger, name_value, cfg.MODEL.NAME)\n else:\n _print_name_value(logger, name_values, cfg.MODEL.NAME)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.load",
"torch.cat",
"torch.no_grad",
"torch.rand",
"torch.nn.DataParallel",
"torch.multiprocessing.set_sharing_strategy"
]
] |
hakyuz16/TrendyolBootCamp
|
[
"4986c7fc061c8a95a1e4484a9027351b2ad142fa"
] |
[
"hw2/reco_sys_with_reg.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 5 15:38:29 2022\n\n@author: humeyraakyuz\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import trange\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import r2_score\nfrom sklearn.metrics import mean_squared_error\nimport matplotlib.pyplot as plt\n\ndef root_mean_squared_error(actual, predictions):\n return np.sqrt(mean_squared_error(actual, predictions))\n\n\n\nclass recoSysWithReg:\n b_user: np.ndarray\n b_item: np.ndarray\n objective_values: np.ndarray\n err_values: np.ndarray\n def __init__(self , alpha: any = 0.01, iteration : any = 2000, lam: any = 0.5):\n \"\"\"\n :param lam:\n :param alpha: \n :param iteration:\n :param sigma:\n \"\"\"\n self.lam = lam\n self.alpha = alpha\n self.iteration = iteration\n \n\n def fit(self, r: np.ndarray):\n m, n = r.shape\n self.objective_values=[]\n self.err_values=[]\n self.b_user= np.random.random((m))\n self.b_item= np.random.random((n))\n irow,icol=np.where(~np.isnan(r))\n \n for i in trange(self.iteration):\n g_b_user = np.zeros(m)\n g_b_item = np.zeros(n)\n \n objective = 0\n err = 0\n for user in range(m):\n g_b_i = 0\n idx = np.where(~np.isnan(r[user]))\n y_i = r[user,idx]\n y_pred_b_i: np.ndarray = self.b_user[user] + self.b_item[idx]\n objective += np.sum((y_i - y_pred_b_i)**2) + self.lam*(self.b_user[user]**2)/2\n err_i = np.sum(y_i - y_pred_b_i)\n \n g_b_i = g_b_i + 2 * err_i \n g_b_i = (g_b_i / len(idx)) + (self.lam * self.b_user[user])\n g_b_user[user] = g_b_user[user] - self.alpha * g_b_i\n \n err += err_i\n \n for item in range(n):\n g_b_j = 0\n idx = np.where(~np.isnan(r[:,item]))\n y_j = r[idx,item]\n y_pred_b_j: np.ndarray = self.b_user[idx] + self.b_item[item]\n objective += np.sum((y_j - y_pred_b_j)**2 ) + self.lam*(self.b_item[item]**2)/2\n err_j = np.sum(y_j - y_pred_b_j)\n g_b_j = g_b_j + 2 * err_j \n g_b_j = (g_b_j / len(idx)) + (self.lam* self.b_item[item])\n g_b_item[item] = g_b_item[item] - self.alpha * g_b_j\n err += err_j\n \n objective = (objective / len(irow)) \n self.objective_values.append(objective)\n self.err_values.append(err)\n \n self.b_user = self.b_user - self.alpha * g_b_user \n self.b_item = self.b_item - self.alpha * g_b_item \n #print(f\"obj: {objective} err: {err}\")\n \n # plot objective function during iterations\n plt.figure(figsize = (10, 6))\n plt.plot(range(1, self.iteration + 1), self.err_values, \"k-\")\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Error\")\n plt.show()\n \n return self.b_user,self.b_item\n\n def predict(self, r: np.array, user: int, item: int) -> np.ndarray:\n \"\"\"\n :param r: Rating matrix\n :param user: User u\n :param item: item j\n :return: Calculated Rating of user rating for item j\n \"\"\"\n\n score = self.b_user[user] + self.b_item[item]\n\n return score\n\n def predict1(self, r: np.array, test_irow: np.ndarray,test_jcol: np.ndarray ) -> float:\n \n err = []\n y_pred_list= []\n y_list = []\n for u, j in zip(test_irow, test_jcol):\n y_pred = self.b_user[u] + self.b_item[j]\n y = r[u, j]\n y_pred_list.append(y_pred)\n y_list.append(y)\n err.append((y_pred - y) ** 2)\n print(np.sum(err)/len(test_irow))\n \n\n return y_pred_list,y_list"
] |
[
[
"numpy.random.random",
"numpy.isnan",
"sklearn.metrics.mean_squared_error",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.figure"
]
] |
rollingman1/mmpose
|
[
"a5a0fe10dca4b848a10b85505d9ad3f091513a63"
] |
[
"mmaction/datasets/pipelines/pose_loading.py"
] |
[
"# Copyright (c) OpenMMLab. All rights reserved.\nimport copy as cp\nimport pickle\n\nimport numpy as np\nfrom mmcv.fileio import FileClient\nfrom scipy.stats import mode\n\nfrom ..builder import PIPELINES\nfrom .augmentations import Flip\n\n\[email protected]_module()\nclass UniformSampleFrames:\n \"\"\"Uniformly sample frames from the video.\n\n To sample an n-frame clip from the video. UniformSampleFrames basically\n divide the video into n segments of equal length and randomly sample one\n frame from each segment. To make the testing results reproducible, a\n random seed is set during testing, to make the sampling results\n deterministic.\n\n Required keys are \"total_frames\", \"start_index\" , added or modified keys\n are \"frame_inds\", \"clip_len\", \"frame_interval\" and \"num_clips\".\n\n Args:\n clip_len (int): Frames of each sampled output clip.\n num_clips (int): Number of clips to be sampled. Default: 1.\n test_mode (bool): Store True when building test or validation dataset.\n Default: False.\n seed (int): The random seed used during test time. Default: 255.\n \"\"\"\n\n def __init__(self, clip_len, num_clips=1, test_mode=False, seed=255):\n\n self.clip_len = clip_len\n self.num_clips = num_clips\n self.test_mode = test_mode\n self.seed = seed\n\n def _get_train_clips(self, num_frames, clip_len):\n \"\"\"Uniformly sample indices for training clips.\n\n Args:\n num_frames (int): The number of frames.\n clip_len (int): The length of the clip.\n \"\"\"\n\n assert self.num_clips == 1\n if num_frames < clip_len:\n start = np.random.randint(0, num_frames)\n inds = np.arange(start, start + clip_len)\n elif clip_len <= num_frames < 2 * clip_len:\n basic = np.arange(clip_len)\n inds = np.random.choice(\n clip_len + 1, num_frames - clip_len, replace=False)\n offset = np.zeros(clip_len + 1, dtype=np.int64)\n offset[inds] = 1\n offset = np.cumsum(offset)\n inds = basic + offset[:-1]\n else:\n bids = np.array(\n [i * num_frames // clip_len for i in range(clip_len + 1)])\n bsize = np.diff(bids)\n bst = bids[:clip_len]\n offset = np.random.randint(bsize)\n inds = bst + offset\n return inds\n\n def _get_test_clips(self, num_frames, clip_len):\n \"\"\"Uniformly sample indices for testing clips.\n\n Args:\n num_frames (int): The number of frames.\n clip_len (int): The length of the clip.\n \"\"\"\n\n np.random.seed(self.seed)\n if num_frames < clip_len:\n # Then we use a simple strategy\n if num_frames < self.num_clips:\n start_inds = list(range(self.num_clips))\n else:\n start_inds = [\n i * num_frames // self.num_clips\n for i in range(self.num_clips)\n ]\n inds = np.concatenate(\n [np.arange(i, i + clip_len) for i in start_inds])\n elif clip_len <= num_frames < clip_len * 2:\n all_inds = []\n for i in range(self.num_clips):\n basic = np.arange(clip_len)\n inds = np.random.choice(\n clip_len + 1, num_frames - clip_len, replace=False)\n offset = np.zeros(clip_len + 1, dtype=np.int64)\n offset[inds] = 1\n offset = np.cumsum(offset)\n inds = basic + offset[:-1]\n all_inds.append(inds)\n inds = np.concatenate(all_inds)\n else:\n bids = np.array(\n [i * num_frames // clip_len for i in range(clip_len + 1)])\n bsize = np.diff(bids)\n bst = bids[:clip_len]\n all_inds = []\n for i in range(self.num_clips):\n offset = np.random.randint(bsize)\n all_inds.append(bst + offset)\n inds = np.concatenate(all_inds)\n return inds\n\n def __call__(self, results):\n num_frames = results['total_frames']\n\n if self.test_mode:\n inds = self._get_test_clips(num_frames, self.clip_len)\n else:\n inds = self._get_train_clips(num_frames, self.clip_len)\n\n inds = np.mod(inds, num_frames)\n start_index = results['start_index']\n inds = inds + start_index\n\n results['frame_inds'] = inds.astype(np.int)\n results['clip_len'] = self.clip_len\n results['frame_interval'] = None\n results['num_clips'] = self.num_clips\n return results\n\n def __repr__(self):\n repr_str = (f'{self.__class__.__name__}('\n f'clip_len={self.clip_len}, '\n f'num_clips={self.num_clips}, '\n f'test_mode={self.test_mode}, '\n f'seed={self.seed})')\n return repr_str\n\n\[email protected]_module()\nclass PoseDecode:\n \"\"\"Load and decode pose with given indices.\n\n Required keys are \"keypoint\", \"frame_inds\" (optional), \"keypoint_score\"\n (optional), added or modified keys are \"keypoint\", \"keypoint_score\" (if\n applicable).\n \"\"\"\n\n @staticmethod\n def _load_kp(kp, frame_inds):\n \"\"\"Load keypoints given frame indices.\n\n Args:\n kp (np.ndarray): The keypoint coordinates.\n frame_inds (np.ndarray): The frame indices.\n \"\"\"\n\n return [x[frame_inds].astype(np.float32) for x in kp]\n\n @staticmethod\n def _load_kpscore(kpscore, frame_inds):\n \"\"\"Load keypoint scores given frame indices.\n\n Args:\n kpscore (np.ndarray): The confidence scores of keypoints.\n frame_inds (np.ndarray): The frame indices.\n \"\"\"\n\n return [x[frame_inds].astype(np.float32) for x in kpscore]\n\n def __call__(self, results):\n\n if 'frame_inds' not in results:\n results['frame_inds'] = np.arange(results['total_frames'])\n\n if results['frame_inds'].ndim != 1:\n results['frame_inds'] = np.squeeze(results['frame_inds'])\n\n offset = results.get('offset', 0)\n frame_inds = results['frame_inds'] + offset\n\n if 'keypoint_score' in results:\n kpscore = results['keypoint_score']\n results['keypoint_score'] = kpscore[:,\n frame_inds].astype(np.float32)\n\n if 'keypoint' in results:\n results['keypoint'] = results['keypoint'][:, frame_inds].astype(\n np.float32)\n\n return results\n\n def __repr__(self):\n repr_str = f'{self.__class__.__name__}()'\n return repr_str\n\n\[email protected]_module()\nclass LoadKineticsPose:\n \"\"\"Load Kinetics Pose given filename (The format should be pickle)\n\n Required keys are \"filename\", \"total_frames\", \"img_shape\", \"frame_inds\",\n \"anno_inds\" (for mmpose source, optional), added or modified keys are\n \"keypoint\", \"keypoint_score\".\n\n Args:\n io_backend (str): IO backend where frames are stored. Default: 'disk'.\n squeeze (bool): Whether to remove frames with no human pose.\n Default: True.\n max_person (int): The max number of persons in a frame. Default: 10.\n keypoint_weight (dict): The weight of keypoints. We set the confidence\n score of a person as the weighted sum of confidence scores of each\n joint. Persons with low confidence scores are dropped (if exceed\n max_person). Default: dict(face=1, torso=2, limb=3).\n source (str): The sources of the keypoints used. Choices are 'mmpose'\n and 'openpose'. Default: 'mmpose'.\n kwargs (dict, optional): Arguments for FileClient.\n \"\"\"\n\n def __init__(self,\n io_backend='disk',\n squeeze=True,\n max_person=100,\n keypoint_weight=dict(face=1, torso=2, limb=3),\n source='mmpose',\n **kwargs):\n\n self.io_backend = io_backend\n self.squeeze = squeeze\n self.max_person = max_person\n self.keypoint_weight = cp.deepcopy(keypoint_weight)\n self.source = source\n\n if source == 'openpose':\n self.kpsubset = dict(\n face=[0, 14, 15, 16, 17],\n torso=[1, 2, 8, 5, 11],\n limb=[3, 4, 6, 7, 9, 10, 12, 13])\n elif source == 'mmpose':\n self.kpsubset = dict(\n face=[0, 1, 2, 3, 4],\n torso=[5, 6, 11, 12],\n limb=[7, 8, 9, 10, 13, 14, 15, 16])\n else:\n raise NotImplementedError('Unknown source of Kinetics Pose')\n\n self.kwargs = kwargs\n self.file_client = None\n\n def __call__(self, results):\n\n assert 'filename' in results\n filename = results.pop('filename')\n\n # only applicable to source == 'mmpose'\n anno_inds = None\n if 'anno_inds' in results:\n assert self.source == 'mmpose'\n anno_inds = results.pop('anno_inds')\n results.pop('box_score', None)\n\n if self.file_client is None:\n self.file_client = FileClient(self.io_backend, **self.kwargs)\n\n bytes = self.file_client.get(filename)\n\n # only the kp array is in the pickle file, each kp include x, y, score.\n kps = pickle.loads(bytes)\n\n total_frames = results['total_frames']\n\n frame_inds = results.pop('frame_inds')\n\n if anno_inds is not None:\n kps = kps[anno_inds]\n frame_inds = frame_inds[anno_inds]\n\n frame_inds = list(frame_inds)\n\n def mapinds(inds):\n uni = np.unique(inds)\n mapp = {x: i for i, x in enumerate(uni)}\n inds = [mapp[x] for x in inds]\n return np.array(inds, dtype=np.int16)\n\n if self.squeeze:\n frame_inds = mapinds(frame_inds)\n total_frames = np.max(frame_inds) + 1\n\n # write it back\n results['total_frames'] = total_frames\n\n h, w = results['img_shape']\n if self.source == 'openpose':\n kps[:, :, 0] *= w\n kps[:, :, 1] *= h\n\n num_kp = kps.shape[1]\n num_person = mode(frame_inds)[-1][0]\n\n new_kp = np.zeros([num_person, total_frames, num_kp, 2],\n dtype=np.float16)\n new_kpscore = np.zeros([num_person, total_frames, num_kp],\n dtype=np.float16)\n # 32768 is enough\n num_person_frame = np.zeros([total_frames], dtype=np.int16)\n\n for frame_ind, kp in zip(frame_inds, kps):\n person_ind = num_person_frame[frame_ind]\n new_kp[person_ind, frame_ind] = kp[:, :2]\n new_kpscore[person_ind, frame_ind] = kp[:, 2]\n num_person_frame[frame_ind] += 1\n\n kpgrp = self.kpsubset\n weight = self.keypoint_weight\n results['num_person'] = num_person\n\n if num_person > self.max_person:\n for i in range(total_frames):\n np_frame = num_person_frame[i]\n val = new_kpscore[:np_frame, i]\n\n val = (\n np.sum(val[:, kpgrp['face']], 1) * weight['face'] +\n np.sum(val[:, kpgrp['torso']], 1) * weight['torso'] +\n np.sum(val[:, kpgrp['limb']], 1) * weight['limb'])\n inds = sorted(range(np_frame), key=lambda x: -val[x])\n new_kpscore[:np_frame, i] = new_kpscore[inds, i]\n new_kp[:np_frame, i] = new_kp[inds, i]\n results['num_person'] = self.max_person\n\n results['keypoint'] = new_kp[:self.max_person]\n results['keypoint_score'] = new_kpscore[:self.max_person]\n return results\n\n def __repr__(self):\n repr_str = (f'{self.__class__.__name__}('\n f'io_backend={self.io_backend}, '\n f'squeeze={self.squeeze}, '\n f'max_person={self.max_person}, '\n f'keypoint_weight={self.keypoint_weight}, '\n f'source={self.source}, '\n f'kwargs={self.kwargs})')\n return repr_str\n\n\[email protected]_module()\nclass GeneratePoseTarget:\n \"\"\"Generate pseudo heatmaps based on joint coordinates and confidence.\n\n Required keys are \"keypoint\", \"img_shape\", \"keypoint_score\" (optional),\n added or modified keys are \"imgs\".\n\n Args:\n sigma (float): The sigma of the generated gaussian map. Default: 0.6.\n use_score (bool): Use the confidence score of keypoints as the maximum\n of the gaussian maps. Default: True.\n with_kp (bool): Generate pseudo heatmaps for keypoints. Default: True.\n with_limb (bool): Generate pseudo heatmaps for limbs. At least one of\n 'with_kp' and 'with_limb' should be True. Default: False.\n skeletons (tuple[tuple]): The definition of human skeletons.\n Default: ((0, 1), (0, 2), (1, 3), (2, 4), (0, 5), (5, 7), (7, 9),\n (0, 6), (6, 8), (8, 10), (5, 11), (11, 13), (13, 15),\n (6, 12), (12, 14), (14, 16), (11, 12)),\n which is the definition of COCO-17p skeletons.\n double (bool): Output both original heatmaps and flipped heatmaps.\n Default: False.\n left_kp (tuple[int]): Indexes of left keypoints, which is used when\n flipping heatmaps. Default: (1, 3, 5, 7, 9, 11, 13, 15),\n which is left keypoints in COCO-17p.\n right_kp (tuple[int]): Indexes of right keypoints, which is used when\n flipping heatmaps. Default: (2, 4, 6, 8, 10, 12, 14, 16),\n which is right keypoints in COCO-17p.\n \"\"\"\n\n def __init__(self,\n sigma=0.6,\n use_score=True,\n with_kp=True,\n with_limb=False,\n skeletons=((0, 1), (0, 2), (1, 3), (2, 4), (0, 5), (5, 7),\n (7, 9), (0, 6), (6, 8), (8, 10), (5, 11), (11, 13),\n (13, 15), (6, 12), (12, 14), (14, 16), (11, 12)),\n double=False,\n left_kp=(1, 3, 5, 7, 9, 11, 13, 15),\n right_kp=(2, 4, 6, 8, 10, 12, 14, 16)):\n\n self.sigma = sigma\n self.use_score = use_score\n self.with_kp = with_kp\n self.with_limb = with_limb\n self.double = double\n\n # an auxiliary const\n self.eps = 1e-4\n\n assert self.with_kp or self.with_limb, (\n 'At least one of \"with_limb\" '\n 'and \"with_kp\" should be set as True.')\n self.left_kp = left_kp\n self.right_kp = right_kp\n self.skeletons = skeletons\n\n def generate_a_heatmap(self, img_h, img_w, centers, sigma, max_values):\n \"\"\"Generate pseudo heatmap for one keypoint in one frame.\n\n Args:\n img_h (int): The height of the heatmap.\n img_w (int): The width of the heatmap.\n centers (np.ndarray): The coordinates of corresponding keypoints\n (of multiple persons).\n sigma (float): The sigma of generated gaussian.\n max_values (np.ndarray): The max values of each keypoint.\n\n Returns:\n np.ndarray: The generated pseudo heatmap.\n \"\"\"\n\n heatmap = np.zeros([img_h, img_w], dtype=np.float32)\n # print('MMMMMMMMMMMM','centers')\n # print('MMMMMMMMMMMM', centers) # MMMMMMMMMMMM [[32.91428757 13.9761343 ]] [[센터 좌표]]\n # print('MMMMMMMMMMMM', 'max_values')\n # print('MMMMMMMMMMMM', max_values) # MMMMMMMMMMMM [0.88378906] 확률\n # print(self.eps) # 0.0001\n\n for center, max_value in zip(centers, max_values):\n mu_x, mu_y = center[0], center[1]\n if max_value < self.eps:\n continue\n\n st_x = max(int(mu_x - 3 * sigma), 0)\n ed_x = min(int(mu_x + 3 * sigma) + 1, img_w)\n st_y = max(int(mu_y - 3 * sigma), 0)\n ed_y = min(int(mu_y + 3 * sigma) + 1, img_h)\n x = np.arange(st_x, ed_x, 1, np.float32)\n y = np.arange(st_y, ed_y, 1, np.float32)\n\n # if the keypoint not in the heatmap coordinate system\n if not (len(x) and len(y)):\n continue\n y = y[:, None]\n\n patch = np.exp(-((x - mu_x)**2 + (y - mu_y)**2) / 2 / sigma**2)\n patch = patch * max_value\n heatmap[st_y:ed_y,\n st_x:ed_x] = np.maximum(heatmap[st_y:ed_y, st_x:ed_x],\n patch)\n\n return heatmap\n\n def generate_a_3d_heatmap(self, img_h, img_w, centers, sigma, max_values, img):\n \"\"\"Generate pseudo heatmap for one keypoint in one frame.\n\n Args:\n img_h (int): The height of the heatmap.\n img_w (int): The width of the heatmap.\n img: 이미지 경로\n centers (np.ndarray): The coordinates of corresponding keypoints\n (of multiple persons).\n sigma (float): The sigma of generated gaussian.\n max_values (np.ndarray): The max values of each keypoint.\n\n Returns:\n np.ndarray: The generated pseudo heatmap.\n \"\"\"\n\n heatmap = np.zeros([img_h, img_w, 3], dtype=np.float32)\n # print(\"MMMMMMMMMMMMmodel heatmap size: \", heatmap.shape)\n from PIL import Image\n # imgArray = np.array(Image.open(img).resize((64, 64)))\n imgArray = np.array(Image.open(img), dtype=np.float32) / 256\n\n # print('MMMMMMMMMMMM','centers')\n # print('MMMMMMMMMMMM', centers) # MMMMMMMMMMMM [[32.91428757 13.9761343 ]] [[센터 좌표]]\n # print('MMMMMMMMMMMM', 'max_values')\n # print('MMMMMMMMMMMM', max_values) # MMMMMMMMMMMM [0.88378906] 확률\n # print(self.eps) # 0.0001\n\n for center, max_value in zip(centers, max_values):\n mu_x, mu_y = center[0], center[1]\n if max_value < self.eps:\n continue\n\n st_x = max(int(mu_x - 3 * sigma), 0)\n ed_x = min(int(mu_x + 3 * sigma) + 1, img_w)\n st_y = max(int(mu_y - 3 * sigma), 0)\n ed_y = min(int(mu_y + 3 * sigma) + 1, img_h)\n x = np.arange(st_x, ed_x, 1, np.float32)\n y = np.arange(st_y, ed_y, 1, np.float32)\n\n # if the keypoint not in the heatmap coordinate system\n if not (len(x) and len(y)):\n continue\n y = y[:, None]\n\n patch = np.exp(-((x - mu_x)**2 + (y - mu_y)**2) / 2 / sigma**2)\n patch = patch * max_value\n #r\n heatmap[st_y:ed_y,\n st_x:ed_x, 0] = np.maximum(heatmap[st_y:ed_y, st_x:ed_x, 0],\n patch)\n #g\n heatmap[st_y:ed_y,\n st_x:ed_x, 1] = np.maximum(heatmap[st_y:ed_y, st_x:ed_x, 0],\n patch)\n #b\n heatmap[st_y:ed_y,\n st_x:ed_x, 2] = np.maximum(heatmap[st_y:ed_y, st_x:ed_x, 0],\n patch)\n\n return heatmap * imgArray\n\n def generate_a_limb_heatmap(self, img_h, img_w, starts, ends, sigma,\n start_values, end_values):\n \"\"\"Generate pseudo heatmap for one limb in one frame.\n\n Args:\n img_h (int): The height of the heatmap.\n img_w (int): The width of the heatmap.\n starts (np.ndarray): The coordinates of one keypoint in the\n corresponding limbs (of multiple persons).\n ends (np.ndarray): The coordinates of the other keypoint in the\n corresponding limbs (of multiple persons).\n sigma (float): The sigma of generated gaussian.\n start_values (np.ndarray): The max values of one keypoint in the\n corresponding limbs.\n end_values (np.ndarray): The max values of the other keypoint in\n the corresponding limbs.\n\n Returns:\n np.ndarray: The generated pseudo heatmap.\n \"\"\"\n\n heatmap = np.zeros([img_h, img_w], dtype=np.float32)\n\n for start, end, start_value, end_value in zip(starts, ends,\n start_values,\n end_values):\n value_coeff = min(start_value, end_value)\n if value_coeff < self.eps:\n continue\n\n min_x, max_x = min(start[0], end[0]), max(start[0], end[0])\n min_y, max_y = min(start[1], end[1]), max(start[1], end[1])\n\n min_x = max(int(min_x - 3 * sigma), 0)\n max_x = min(int(max_x + 3 * sigma) + 1, img_w)\n min_y = max(int(min_y - 3 * sigma), 0)\n max_y = min(int(max_y + 3 * sigma) + 1, img_h)\n\n x = np.arange(min_x, max_x, 1, np.float32)\n y = np.arange(min_y, max_y, 1, np.float32)\n\n if not (len(x) and len(y)):\n continue\n\n y = y[:, None]\n x_0 = np.zeros_like(x)\n y_0 = np.zeros_like(y)\n\n # distance to start keypoints\n d2_start = ((x - start[0])**2 + (y - start[1])**2)\n\n # distance to end keypoints\n d2_end = ((x - end[0])**2 + (y - end[1])**2)\n\n # the distance between start and end keypoints.\n d2_ab = ((start[0] - end[0])**2 + (start[1] - end[1])**2)\n\n if d2_ab < 1:\n full_map = self.generate_a_heatmap(img_h, img_w, [start],\n sigma, [start_value])\n heatmap = np.maximum(heatmap, full_map)\n continue\n\n coeff = (d2_start - d2_end + d2_ab) / 2. / d2_ab\n\n a_dominate = coeff <= 0\n b_dominate = coeff >= 1\n seg_dominate = 1 - a_dominate - b_dominate\n\n position = np.stack([x + y_0, y + x_0], axis=-1)\n projection = start + np.stack([coeff, coeff], axis=-1) * (\n end - start)\n d2_line = position - projection\n d2_line = d2_line[:, :, 0]**2 + d2_line[:, :, 1]**2\n d2_seg = (\n a_dominate * d2_start + b_dominate * d2_end +\n seg_dominate * d2_line)\n\n patch = np.exp(-d2_seg / 2. / sigma**2)\n patch = patch * value_coeff\n\n heatmap[min_y:max_y, min_x:max_x] = np.maximum(\n heatmap[min_y:max_y, min_x:max_x], patch)\n\n return heatmap\n\n def generate_heatmap(self, img_h, img_w, kps, sigma, max_values):\n \"\"\"Generate pseudo heatmap for all keypoints and limbs in one frame (if\n needed).\n\n Args:\n img_h (int): The height of the heatmap.\n img_w (int): The width of the heatmap.\n kps (np.ndarray): The coordinates of keypoints in this frame.\n sigma (float): The sigma of generated gaussian.\n max_values (np.ndarray): The confidence score of each keypoint.\n\n Returns:\n np.ndarray: The generated pseudo heatmap.\n \"\"\"\n\n heatmaps = []\n if self.with_kp:\n num_kp = kps.shape[1]\n # print('MMMMMMMMMM num_kp', num_kp)\n # print('MMMMMMMMMM max_values', max_values)\n\n for i in range(num_kp):\n # print(\"MMMMMMMMM\", 'kps one_videos', kps)\n heatmap = self.generate_a_heatmap(img_h, img_w, kps[:, i],\n sigma, max_values[:, i])\n heatmaps.append(heatmap)\n\n if self.with_limb:\n for limb in self.skeletons:\n start_idx, end_idx = limb\n starts = kps[:, start_idx]\n ends = kps[:, end_idx]\n\n start_values = max_values[:, start_idx]\n end_values = max_values[:, end_idx]\n heatmap = self.generate_a_limb_heatmap(img_h, img_w, starts,\n ends, sigma,\n start_values,\n end_values)\n heatmaps.append(heatmap)\n\n return np.stack(heatmaps, axis=-1)\n\n def generate_3d_heatmap(self, img_h, img_w, kps, sigma, max_values, img_path):\n \"\"\"Generate pseudo heatmap for all keypoints and limbs in one frame (if\n needed).\n\n Args:\n img_h (int): The height of the heatmap.\n img_w (int): The width of the heatmap.\n kps (np.ndarray): The coordinates of keypoints in this frame.\n sigma (float): The sigma of generated gaussian.\n max_values (np.ndarray): The confidence score of each keypoint.\n img_path (string) : image path\n\n Returns:\n np.ndarray: The generated pseudo heatmap.\n \"\"\"\n\n # heatmap_image = np.zeros((img_h, img_w, 3), dtype=np.uint8)\n heatmaps = []\n\n if self.with_kp:\n num_kp = kps.shape[1]\n # print('MMMMMMMMMM num_kp', num_kp)\n # print('MMMMMMMMMM max_values', max_values)\n\n for i in range(num_kp):\n # print(\"MMMMMMMMmodel img_h, img_w :\", img_h, img_w)\n # print(\"MMMMMMMMM\", 'kps one_videos', kps)\n heatmap = self.generate_a_3d_heatmap(img_h, img_w, kps[:, i],\n sigma, max_values[:, i],\n img_path)[:, :, 0]\n # print(\"MMMMMMMmodel heatmap.shape:\", heatmap[:, :, 0].shape)\n # heatmaps = np.append(heatmaps, heatmap, axis=0)\n # print(heatmaps.shape)\n heatmaps.append(heatmap)\n\n return np.stack(heatmaps, axis=-1)\n\n def gen_an_aug(self, results):\n \"\"\"Generate pseudo heatmaps for all frames.\n\n Args:\n results (dict): The dictionary that contains all info of a sample.\n\n Returns:\n list[np.ndarray]: The generated pseudo heatmaps.\n \"\"\"\n\n all_kps = results['keypoint']\n # print(\"MMMMMMMMMModel frame dir\",results['frame_dir'])\n kp_shape = all_kps.shape\n\n if 'keypoint_score' in results:\n all_kpscores = results['keypoint_score']\n # print('MMMMMMMM', 'all_kpscores', all_kpscores)\n # print('MMMMMMMM', 'all_kpscores', all_kpscores.shape)\n else:\n all_kpscores = np.ones(kp_shape[:-1], dtype=np.float32)\n\n img_h, img_w = results['img_shape']\n num_frame = kp_shape[1]\n\n imgs = []\n for i in range(num_frame):\n sigma = self.sigma\n kps = all_kps[:, i]\n kpscores = all_kpscores[:, i]\n\n max_values = np.ones(kpscores.shape, dtype=np.float32)\n if self.use_score:\n max_values = kpscores\n\n hmap = self.generate_heatmap(img_h, img_w, kps, sigma, max_values)\n imgs.append(hmap)\n\n return imgs\n\n def gen_an_3d_aug(self, results):\n \"\"\"Generate pseudo heatmaps for all frames.\n\n Args:\n results (dict): The dictionary that contains all info of a sample.\n\n Returns:\n list[np.ndarray]: The generated pseudo heatmaps.\n \"\"\"\n\n all_kps = results['keypoint']\n kp_shape = all_kps.shape\n\n if 'keypoint_score' in results:\n all_kpscores = results['keypoint_score']\n # print('MMMMMMMM', 'all_kpscores', all_kpscores)\n # print('MMMMMMMM', 'all_kpscores', all_kpscores.shape)\n else:\n all_kpscores = np.ones(kp_shape[:-1], dtype=np.float32)\n\n img_h, img_w = results['img_shape']\n num_frame = kp_shape[1]\n # print(\"MMMMMMMMMMmodel kp_shape\", kp_shape)\n # print(\"MMMMMMMMMMmodel num_frame\", num_frame)\n\n frame_dir = results['frame_dir']\n # print(\"MMMMMMMMModel frame_dir\", frame_dir)\n # print(\"MMMMMMMMModel label\", results['label'])\n\n imgs = []\n for i in range(num_frame):\n # for i in range(10):\n sigma = self.sigma\n kps = all_kps[:, i]\n kpscores = all_kpscores[:, i]\n\n max_values = np.ones(kpscores.shape, dtype=np.float32)\n if self.use_score:\n max_values = kpscores\n\n hmap = self.generate_3d_heatmap(img_h, img_w, kps, sigma, max_values, '/' + frame_dir + '/img_{:05d}.jpg'.format(i))\n imgs.append(hmap)\n\n return imgs\n\n def __call__(self, results, is_rgb=False):\n # if not is_rgb:\n if not self.double:\n results['imgs'] = np.stack(self.gen_an_aug(results))\n else:\n results_ = cp.deepcopy(results)\n flip = Flip(\n flip_ratio=1, left_kp=self.left_kp, right_kp=self.right_kp)\n results_ = flip(results_)\n results['imgs'] = np.concatenate(\n [self.gen_an_aug(results),\n self.gen_an_aug(results_)])\n # else:\n #if not self.double:\n # results['imgs'] = np.stack(self.gen_an_3d_aug(results))\n #else:\n # results_ = cp.deepcopy(results)\n # flip = Flip(\n # flip_ratio=1, left_kp=self.left_kp, right_kp=self.right_kp)\n # results_ = flip(results_)\n # results['imgs'] = np.concatenate(\n # [self.gen_an_3d_aug(results),\n # self.gen_an_3d_aug(results_)])\n\n return results\n\n def __repr__(self):\n repr_str = (f'{self.__class__.__name__}('\n f'sigma={self.sigma}, '\n f'use_score={self.use_score}, '\n f'with_kp={self.with_kp}, '\n f'with_limb={self.with_limb}, '\n f'skeletons={self.skeletons}, '\n f'double={self.double}, '\n f'left_kp={self.left_kp}, '\n f'right_kp={self.right_kp})')\n return repr_str\n"
] |
[
[
"numpy.squeeze",
"numpy.cumsum",
"numpy.concatenate",
"numpy.max",
"numpy.zeros_like",
"numpy.exp",
"numpy.random.randint",
"numpy.unique",
"numpy.arange",
"numpy.stack",
"numpy.diff",
"numpy.zeros",
"numpy.random.choice",
"numpy.array",
"numpy.sum",
"numpy.maximum",
"numpy.random.seed",
"numpy.ones",
"scipy.stats.mode",
"numpy.mod"
]
] |
AW14Saurabh/pointnet
|
[
"25cd03432c749c327fa8df8040b1c1f1674cd99a"
] |
[
"models/transform_nets.py"
] |
[
"import tensorflow as tf\nimport numpy as np\nimport sys\nimport os\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\nfrom utils import tf_util\n\ndef input_transform_net(point_cloud, is_training, bn_decay=None, K=3):\n \"\"\" Input (XYZ) Transform Net, input is BxNx3 gray image\n Return:\n Transformation matrix of size 3xK \"\"\"\n batch_size = point_cloud.get_shape()[0].value\n num_point = point_cloud.get_shape()[1].value\n\n input_image = tf.expand_dims(point_cloud, -1)\n net = tf_util.conv2d(input_image, 64, [1,3],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='tconv1', bn_decay=bn_decay)\n net = tf_util.conv2d(net, 128, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='tconv2', bn_decay=bn_decay)\n net = tf_util.conv2d(net, 1024, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='tconv3', bn_decay=bn_decay)\n net = tf_util.max_pool2d(net, [num_point,1],\n padding='VALID', scope='tmaxpool')\n\n net = tf.reshape(net, [batch_size, -1])\n net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,\n scope='tfc1', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,\n scope='tfc2', bn_decay=bn_decay)\n\n with tf.variable_scope('transform_XYZ') as sc:\n assert(K==3)\n weights = tf.get_variable('weights', [256, 3*K],\n initializer=tf.constant_initializer(0.0),\n dtype=tf.float32)\n biases = tf.get_variable('biases', [3*K],\n initializer=tf.constant_initializer(0.0),\n dtype=tf.float32)\n biases += tf.constant([1,0,0,0,1,0,0,0,1], dtype=tf.float32)\n transform = tf.matmul(net, weights)\n transform = tf.nn.bias_add(transform, biases)\n\n transform = tf.reshape(transform, [batch_size, 3, K])\n return transform\n\n\ndef feature_transform_net(inputs, is_training, bn_decay=None, K=64):\n \"\"\" Feature Transform Net, input is BxNx1xK\n Return:\n Transformation matrix of size KxK \"\"\"\n batch_size = inputs.get_shape()[0].value\n num_point = inputs.get_shape()[1].value\n\n net = tf_util.conv2d(inputs, 64, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='tconv1', bn_decay=bn_decay)\n net = tf_util.conv2d(net, 128, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='tconv2', bn_decay=bn_decay)\n net = tf_util.conv2d(net, 1024, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='tconv3', bn_decay=bn_decay)\n net = tf_util.max_pool2d(net, [num_point,1],\n padding='VALID', scope='tmaxpool')\n\n net = tf.reshape(net, [batch_size, -1])\n net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,\n scope='tfc1', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,\n scope='tfc2', bn_decay=bn_decay)\n\n with tf.variable_scope('transform_feat') as sc:\n weights = tf.get_variable('weights', [256, K*K],\n initializer=tf.constant_initializer(0.0),\n dtype=tf.float32)\n biases = tf.get_variable('biases', [K*K],\n initializer=tf.constant_initializer(0.0),\n dtype=tf.float32)\n biases += tf.constant(np.eye(K).flatten(), dtype=tf.float32)\n transform = tf.matmul(net, weights)\n transform = tf.nn.bias_add(transform, biases)\n\n transform = tf.reshape(transform, [batch_size, K, K])\n return transform\n"
] |
[
[
"tensorflow.nn.bias_add",
"tensorflow.matmul",
"tensorflow.constant",
"numpy.eye",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.constant_initializer",
"tensorflow.variable_scope"
]
] |
woodydrn/tensorflow
|
[
"19d1ebd28f7109d77cab83160a55341667ae5bb0"
] |
[
"tensorflow/python/framework/tensor_shape.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Helper classes for tensor shape inference.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport operator\nimport six\n\nfrom tensorflow.core.framework import tensor_shape_pb2\nfrom tensorflow.python import tf2\nfrom tensorflow.python.eager import monitoring\nfrom tensorflow.python.util.tf_export import tf_export\n\n_TENSORSHAPE_V2_OVERRIDE = None\n\n_api_usage_gauge = monitoring.BoolGauge(\n \"/tensorflow/api/v2_tensorshape\",\n \"Whether tensor_shape.enable_v2_tensorshape() is called.\")\n\n\n@tf_export(v1=[\"enable_v2_tensorshape\"])\ndef enable_v2_tensorshape():\n \"\"\"In TensorFlow 2.0, iterating over a TensorShape instance returns values.\n\n This enables the new behavior.\n\n Concretely, `tensor_shape[i]` returned a Dimension instance in V1, but\n it V2 it returns either an integer, or None.\n\n Examples:\n\n ```\n #######################\n # If you had this in V1:\n value = tensor_shape[i].value\n\n # Do this in V2 instead:\n value = tensor_shape[i]\n\n #######################\n # If you had this in V1:\n for dim in tensor_shape:\n value = dim.value\n print(value)\n\n # Do this in V2 instead:\n for value in tensor_shape:\n print(value)\n\n #######################\n # If you had this in V1:\n dim = tensor_shape[i]\n dim.assert_is_compatible_with(other_shape) # or using any other shape method\n\n # Do this in V2 instead:\n if tensor_shape.rank is None:\n dim = Dimension(None)\n else:\n dim = tensor_shape.dims[i]\n dim.assert_is_compatible_with(other_shape) # or using any other shape method\n\n # The V2 suggestion above is more explicit, which will save you from\n # the following trap (present in V1):\n # you might do in-place modifications to `dim` and expect them to be reflected\n # in `tensor_shape[i]`, but they would not be.\n ```\n \"\"\"\n global _TENSORSHAPE_V2_OVERRIDE # pylint: disable=invalid-name\n _TENSORSHAPE_V2_OVERRIDE = True\n _api_usage_gauge.get_cell().set(True)\n\n\n@tf_export(v1=[\"disable_v2_tensorshape\"])\ndef disable_v2_tensorshape():\n \"\"\"Disables the V2 TensorShape behavior and reverts to V1 behavior.\n\n See docstring for `enable_v2_tensorshape` for details about the new behavior.\n \"\"\"\n global _TENSORSHAPE_V2_OVERRIDE # pylint: disable=invalid-name\n _TENSORSHAPE_V2_OVERRIDE = False\n _api_usage_gauge.get_cell().set(False)\n\n\n@tf_export(\n \"compat.dimension_value\", v1=[\"dimension_value\", \"compat.dimension_value\"])\ndef dimension_value(dimension):\n \"\"\"Compatibility utility required to allow for both V1 and V2 behavior in TF.\n\n Until the release of TF 2.0, we need the legacy behavior of `TensorShape` to\n coexist with the new behavior. This utility is a bridge between the two.\n\n When accessing the value of a TensorShape dimension,\n use this utility, like this:\n\n ```\n # If you had this in your V1 code:\n value = tensor_shape[i].value\n\n # Use `dimension_value` as direct replacement compatible with both V1 & V2:\n value = dimension_value(tensor_shape[i])\n\n # This would be the V2 equivalent:\n value = tensor_shape[i] # Warning: this will return the dim value in V2!\n ```\n\n Args:\n dimension: Either a `Dimension` instance, an integer, or None.\n\n Returns:\n A plain value, i.e. an integer or None.\n \"\"\"\n if isinstance(dimension, Dimension):\n return dimension.value\n return dimension\n\n\n@tf_export(\n \"compat.dimension_at_index\",\n v1=[\"dimension_at_index\", \"compat.dimension_at_index\"])\ndef dimension_at_index(shape, index):\n \"\"\"Compatibility utility required to allow for both V1 and V2 behavior in TF.\n\n Until the release of TF 2.0, we need the legacy behavior of `TensorShape` to\n coexist with the new behavior. This utility is a bridge between the two.\n\n If you want to retrieve the Dimension instance corresponding to a certain\n index in a TensorShape instance, use this utility, like this:\n\n ```\n # If you had this in your V1 code:\n dim = tensor_shape[i]\n\n # Use `dimension_at_index` as direct replacement compatible with both V1 & V2:\n dim = dimension_at_index(tensor_shape, i)\n\n # Another possibility would be this, but WARNING: it only works if the\n # tensor_shape instance has a defined rank.\n dim = tensor_shape.dims[i] # `dims` may be None if the rank is undefined!\n\n # In native V2 code, we recommend instead being more explicit:\n if tensor_shape.rank is None:\n dim = Dimension(None)\n else:\n dim = tensor_shape.dims[i]\n\n # Being more explicit will save you from the following trap (present in V1):\n # you might do in-place modifications to `dim` and expect them to be reflected\n # in `tensor_shape[i]`, but they would not be (as the Dimension object was\n # instantiated on the fly.\n ```\n\n Args:\n shape: A TensorShape instance.\n index: An integer index.\n\n Returns:\n A dimension object.\n \"\"\"\n assert isinstance(shape, TensorShape)\n if shape.rank is None:\n return Dimension(None)\n else:\n return shape.dims[index]\n\n\n@tf_export(v1=[\"Dimension\"])\nclass Dimension(object):\n \"\"\"Represents the value of one dimension in a TensorShape.\"\"\"\n\n __slots__ = [\"_value\"]\n\n def __init__(self, value):\n \"\"\"Creates a new Dimension with the given value.\"\"\"\n if isinstance(value, int): # Most common case.\n if value < 0:\n raise ValueError(\"Dimension %d must be >= 0\" % value)\n self._value = value\n elif value is None:\n self._value = None\n elif isinstance(value, Dimension):\n self._value = value._value\n else:\n try:\n # int(...) compensates for the int/long dichotomy on Python 2.X.\n # TODO(b/143206389): Remove once we fully migrate to 3.X.\n self._value = int(value.__index__())\n except AttributeError:\n six.raise_from(\n TypeError(\"Dimension value must be integer or None or have \"\n \"an __index__ method, got value '{0!r}' with type '{1!r}'\"\n .format(value, type(value))), None)\n if self._value < 0:\n raise ValueError(\"Dimension %d must be >= 0\" % self._value)\n\n def __repr__(self):\n return \"Dimension(%s)\" % repr(self._value)\n\n def __str__(self):\n value = self._value\n return \"?\" if value is None else str(value)\n\n def __eq__(self, other):\n \"\"\"Returns true if `other` has the same known value as this Dimension.\"\"\"\n try:\n other = as_dimension(other)\n except (TypeError, ValueError):\n return NotImplemented\n if self._value is None or other.value is None:\n return None\n return self._value == other.value\n\n def __ne__(self, other):\n \"\"\"Returns true if `other` has a different known value from `self`.\"\"\"\n try:\n other = as_dimension(other)\n except (TypeError, ValueError):\n return NotImplemented\n if self._value is None or other.value is None:\n return None\n return self._value != other.value\n\n def __bool__(self):\n \"\"\"Equivalent to `bool(self.value)`.\"\"\"\n return bool(self._value)\n\n def __int__(self):\n return self._value\n\n # This is needed for Windows.\n # See https://github.com/tensorflow/tensorflow/pull/9780\n def __long__(self):\n return self._value\n\n def __index__(self):\n # Allow use in Python 3 range\n return self._value\n\n def __hash__(self):\n if self._value is None:\n raise ValueError(\"Unable to hash Dimension with value 'None'\")\n return hash(self._value)\n\n @property\n def value(self):\n \"\"\"The value of this dimension, or None if it is unknown.\"\"\"\n return self._value\n\n def is_compatible_with(self, other):\n \"\"\"Returns true if `other` is compatible with this Dimension.\n\n Two known Dimensions are compatible if they have the same value.\n An unknown Dimension is compatible with all other Dimensions.\n\n Args:\n other: Another Dimension.\n\n Returns:\n True if this Dimension and `other` are compatible.\n \"\"\"\n other = as_dimension(other)\n return (self._value is None or other.value is None or\n self._value == other.value)\n\n def assert_is_compatible_with(self, other):\n \"\"\"Raises an exception if `other` is not compatible with this Dimension.\n\n Args:\n other: Another Dimension.\n\n Raises:\n ValueError: If `self` and `other` are not compatible (see\n is_compatible_with).\n \"\"\"\n if not self.is_compatible_with(other):\n raise ValueError(\"Dimensions %s and %s are not compatible\" %\n (self, other))\n\n def merge_with(self, other):\n \"\"\"Returns a Dimension that combines the information in `self` and `other`.\n\n Dimensions are combined as follows:\n\n ```python\n tf.compat.v1.Dimension(n) .merge_with(tf.compat.v1.Dimension(n)) ==\n tf.compat.v1.Dimension(n)\n tf.compat.v1.Dimension(n) .merge_with(tf.compat.v1.Dimension(None)) ==\n tf.compat.v1.Dimension(n)\n tf.compat.v1.Dimension(None).merge_with(tf.compat.v1.Dimension(n)) ==\n tf.compat.v1.Dimension(n)\n # equivalent to tf.compat.v1.Dimension(None)\n tf.compat.v1.Dimension(None).merge_with(tf.compat.v1.Dimension(None))\n\n # raises ValueError for n != m\n tf.compat.v1.Dimension(n) .merge_with(tf.compat.v1.Dimension(m))\n ```\n\n Args:\n other: Another Dimension.\n\n Returns:\n A Dimension containing the combined information of `self` and\n `other`.\n\n Raises:\n ValueError: If `self` and `other` are not compatible (see\n is_compatible_with).\n \"\"\"\n other = as_dimension(other)\n self.assert_is_compatible_with(other)\n if self._value is None:\n return Dimension(other.value)\n else:\n return Dimension(self._value)\n\n def __add__(self, other):\n \"\"\"Returns the sum of `self` and `other`.\n\n Dimensions are summed as follows:\n\n ```python\n tf.compat.v1.Dimension(m) + tf.compat.v1.Dimension(n) ==\n tf.compat.v1.Dimension(m + n)\n tf.compat.v1.Dimension(m) + tf.compat.v1.Dimension(None) # equiv. to\n tf.compat.v1.Dimension(None)\n tf.compat.v1.Dimension(None) + tf.compat.v1.Dimension(n) # equiv. to\n tf.compat.v1.Dimension(None)\n tf.compat.v1.Dimension(None) + tf.compat.v1.Dimension(None) # equiv. to\n tf.compat.v1.Dimension(None)\n ```\n\n Args:\n other: Another Dimension, or a value accepted by `as_dimension`.\n\n Returns:\n A Dimension whose value is the sum of `self` and `other`.\n \"\"\"\n try:\n other = as_dimension(other)\n except (TypeError, ValueError):\n return NotImplemented\n if self._value is None or other.value is None:\n return Dimension(None)\n else:\n return Dimension(self._value + other.value)\n\n def __radd__(self, other):\n \"\"\"Returns the sum of `other` and `self`.\n\n Args:\n other: Another Dimension, or a value accepted by `as_dimension`.\n\n Returns:\n A Dimension whose value is the sum of `self` and `other`.\n \"\"\"\n return self + other\n\n def __sub__(self, other):\n \"\"\"Returns the subtraction of `other` from `self`.\n\n Dimensions are subtracted as follows:\n\n ```python\n tf.compat.v1.Dimension(m) - tf.compat.v1.Dimension(n) ==\n tf.compat.v1.Dimension(m - n)\n tf.compat.v1.Dimension(m) - tf.compat.v1.Dimension(None) # equiv. to\n tf.compat.v1.Dimension(None)\n tf.compat.v1.Dimension(None) - tf.compat.v1.Dimension(n) # equiv. to\n tf.compat.v1.Dimension(None)\n tf.compat.v1.Dimension(None) - tf.compat.v1.Dimension(None) # equiv. to\n tf.compat.v1.Dimension(None)\n ```\n\n Args:\n other: Another Dimension, or a value accepted by `as_dimension`.\n\n Returns:\n A Dimension whose value is the subtraction of `other` from `self`.\n \"\"\"\n try:\n other = as_dimension(other)\n except (TypeError, ValueError):\n return NotImplemented\n if self._value is None or other.value is None:\n return Dimension(None)\n else:\n return Dimension(self._value - other.value)\n\n def __rsub__(self, other):\n \"\"\"Returns the subtraction of `self` from `other`.\n\n Args:\n other: Another Dimension, or a value accepted by `as_dimension`.\n\n Returns:\n A Dimension whose value is the subtraction of `self` from `other`.\n \"\"\"\n other = as_dimension(other)\n if self._value is None or other.value is None:\n return Dimension(None)\n else:\n return Dimension(other.value - self._value)\n\n def __mul__(self, other):\n \"\"\"Returns the product of `self` and `other`.\n\n Dimensions are summed as follows:\n\n ```python\n tf.compat.v1.Dimension(m) * tf.compat.v1.Dimension(n) ==\n tf.compat.v1.Dimension(m * n)\n tf.compat.v1.Dimension(m) * tf.compat.v1.Dimension(None) # equiv. to\n tf.compat.v1.Dimension(None)\n tf.compat.v1.Dimension(None) * tf.compat.v1.Dimension(n) # equiv. to\n tf.compat.v1.Dimension(None)\n tf.compat.v1.Dimension(None) * tf.compat.v1.Dimension(None) # equiv. to\n tf.compat.v1.Dimension(None)\n ```\n\n Args:\n other: Another Dimension, or a value accepted by `as_dimension`.\n\n Returns:\n A Dimension whose value is the product of `self` and `other`.\n \"\"\"\n try:\n other = as_dimension(other)\n except (TypeError, ValueError):\n return NotImplemented\n\n if self._value is None or other.value is None:\n return Dimension(None)\n else:\n return Dimension(self._value * other.value)\n\n def __rmul__(self, other):\n \"\"\"Returns the product of `self` and `other`.\n\n Args:\n other: Another Dimension, or a value accepted by `as_dimension`.\n\n Returns:\n A Dimension whose value is the product of `self` and `other`.\n \"\"\"\n return self * other\n\n def __floordiv__(self, other):\n \"\"\"Returns the quotient of `self` and `other` rounded down.\n\n Dimensions are divided as follows:\n\n ```python\n tf.compat.v1.Dimension(m) // tf.compat.v1.Dimension(n) ==\n tf.compat.v1.Dimension(m // n)\n tf.compat.v1.Dimension(m) // tf.compat.v1.Dimension(None) # equiv. to\n tf.compat.v1.Dimension(None)\n tf.compat.v1.Dimension(None) // tf.compat.v1.Dimension(n) # equiv. to\n tf.compat.v1.Dimension(None)\n tf.compat.v1.Dimension(None) // tf.compat.v1.Dimension(None) # equiv. to\n tf.compat.v1.Dimension(None)\n ```\n\n Args:\n other: Another Dimension, or a value accepted by `as_dimension`.\n\n Returns:\n A `Dimension` whose value is the integer quotient of `self` and `other`.\n \"\"\"\n try:\n other = as_dimension(other)\n except (TypeError, ValueError):\n return NotImplemented\n if self._value is None or other.value is None:\n return Dimension(None)\n else:\n return Dimension(self._value // other.value)\n\n def __rfloordiv__(self, other):\n \"\"\"Returns the quotient of `other` and `self` rounded down.\n\n Args:\n other: Another Dimension, or a value accepted by `as_dimension`.\n\n Returns:\n A `Dimension` whose value is the integer quotient of `self` and `other`.\n \"\"\"\n other = as_dimension(other)\n if self._value is None or other.value is None:\n return Dimension(None)\n else:\n return Dimension(other.value // self._value)\n\n def __div__(self, other):\n \"\"\"DEPRECATED: Use `__floordiv__` via `x // y` instead.\n\n This function exists only for backwards compatibility purposes; new code\n should use `__floordiv__` via the syntax `x // y`. Using `x // y`\n communicates clearly that the result rounds down, and is forward compatible\n to Python 3.\n\n Args:\n other: Another `Dimension`.\n\n Returns:\n A `Dimension` whose value is the integer quotient of `self` and `other`.\n \"\"\"\n return self // other\n\n def __rdiv__(self, other):\n \"\"\"Use `__floordiv__` via `x // y` instead.\n\n This function exists only to have a better error message. Instead of:\n `TypeError: unsupported operand type(s) for /: 'int' and 'Dimension'`,\n this function will explicitly call for usage of `//` instead.\n\n Args:\n other: Another `Dimension`.\n\n Raises:\n TypeError.\n \"\"\"\n raise TypeError(\"unsupported operand type(s) for /: '{}' and 'Dimension', \"\n \"please use // instead\".format(type(other).__name__))\n\n def __truediv__(self, other):\n \"\"\"Use `__floordiv__` via `x // y` instead.\n\n This function exists only to have a better error message. Instead of:\n `TypeError: unsupported operand type(s) for /: 'Dimension' and 'int'`,\n this function will explicitly call for usage of `//` instead.\n\n Args:\n other: Another `Dimension`.\n\n Raises:\n TypeError.\n \"\"\"\n raise TypeError(\"unsupported operand type(s) for /: 'Dimension' and '{}', \"\n \"please use // instead\".format(type(other).__name__))\n\n def __rtruediv__(self, other):\n \"\"\"Use `__floordiv__` via `x // y` instead.\n\n This function exists only to have a better error message. Instead of:\n `TypeError: unsupported operand type(s) for /: 'int' and 'Dimension'`,\n this function will explicitly call for usage of `//` instead.\n\n Args:\n other: Another `Dimension`.\n\n Raises:\n TypeError.\n \"\"\"\n raise TypeError(\"unsupported operand type(s) for /: '{}' and 'Dimension', \"\n \"please use // instead\".format(type(other).__name__))\n\n def __mod__(self, other):\n \"\"\"Returns `self` modulo `other`.\n\n Dimension modulo are computed as follows:\n\n ```python\n tf.compat.v1.Dimension(m) % tf.compat.v1.Dimension(n) ==\n tf.compat.v1.Dimension(m % n)\n tf.compat.v1.Dimension(m) % tf.compat.v1.Dimension(None) # equiv. to\n tf.compat.v1.Dimension(None)\n tf.compat.v1.Dimension(None) % tf.compat.v1.Dimension(n) # equiv. to\n tf.compat.v1.Dimension(None)\n tf.compat.v1.Dimension(None) % tf.compat.v1.Dimension(None) # equiv. to\n tf.compat.v1.Dimension(None)\n ```\n\n Args:\n other: Another Dimension, or a value accepted by `as_dimension`.\n\n Returns:\n A Dimension whose value is `self` modulo `other`.\n \"\"\"\n other = as_dimension(other)\n if self._value is None or other.value is None:\n return Dimension(None)\n else:\n return Dimension(self._value % other.value)\n\n def __rmod__(self, other):\n \"\"\"Returns `other` modulo `self`.\n\n Args:\n other: Another Dimension, or a value accepted by `as_dimension`.\n\n Returns:\n A Dimension whose value is `other` modulo `self`.\n \"\"\"\n other = as_dimension(other)\n return other % self\n\n def __lt__(self, other):\n \"\"\"Returns True if `self` is known to be less than `other`.\n\n Dimensions are compared as follows:\n\n ```python\n (tf.compat.v1.Dimension(m) < tf.compat.v1.Dimension(n)) == (m < n)\n (tf.compat.v1.Dimension(m) < tf.compat.v1.Dimension(None)) == None\n (tf.compat.v1.Dimension(None) < tf.compat.v1.Dimension(n)) == None\n (tf.compat.v1.Dimension(None) < tf.compat.v1.Dimension(None)) == None\n ```\n\n Args:\n other: Another Dimension.\n\n Returns:\n The value of `self.value < other.value` if both are known, otherwise\n None.\n \"\"\"\n other = as_dimension(other)\n if self._value is None or other.value is None:\n return None\n else:\n return self._value < other.value\n\n def __le__(self, other):\n \"\"\"Returns True if `self` is known to be less than or equal to `other`.\n\n Dimensions are compared as follows:\n\n ```python\n (tf.compat.v1.Dimension(m) <= tf.compat.v1.Dimension(n)) == (m <= n)\n (tf.compat.v1.Dimension(m) <= tf.compat.v1.Dimension(None)) == None\n (tf.compat.v1.Dimension(None) <= tf.compat.v1.Dimension(n)) == None\n (tf.compat.v1.Dimension(None) <= tf.compat.v1.Dimension(None)) == None\n ```\n\n Args:\n other: Another Dimension.\n\n Returns:\n The value of `self.value <= other.value` if both are known, otherwise\n None.\n \"\"\"\n other = as_dimension(other)\n if self._value is None or other.value is None:\n return None\n else:\n return self._value <= other.value\n\n def __gt__(self, other):\n \"\"\"Returns True if `self` is known to be greater than `other`.\n\n Dimensions are compared as follows:\n\n ```python\n (tf.compat.v1.Dimension(m) > tf.compat.v1.Dimension(n)) == (m > n)\n (tf.compat.v1.Dimension(m) > tf.compat.v1.Dimension(None)) == None\n (tf.compat.v1.Dimension(None) > tf.compat.v1.Dimension(n)) == None\n (tf.compat.v1.Dimension(None) > tf.compat.v1.Dimension(None)) == None\n ```\n\n Args:\n other: Another Dimension.\n\n Returns:\n The value of `self.value > other.value` if both are known, otherwise\n None.\n \"\"\"\n other = as_dimension(other)\n if self._value is None or other.value is None:\n return None\n else:\n return self._value > other.value\n\n def __ge__(self, other):\n \"\"\"Returns True if `self` is known to be greater than or equal to `other`.\n\n Dimensions are compared as follows:\n\n ```python\n (tf.compat.v1.Dimension(m) >= tf.compat.v1.Dimension(n)) == (m >= n)\n (tf.compat.v1.Dimension(m) >= tf.compat.v1.Dimension(None)) == None\n (tf.compat.v1.Dimension(None) >= tf.compat.v1.Dimension(n)) == None\n (tf.compat.v1.Dimension(None) >= tf.compat.v1.Dimension(None)) == None\n ```\n\n Args:\n other: Another Dimension.\n\n Returns:\n The value of `self.value >= other.value` if both are known, otherwise\n None.\n \"\"\"\n other = as_dimension(other)\n if self._value is None or other.value is None:\n return None\n else:\n return self._value >= other.value\n\n def __reduce__(self):\n return Dimension, (self._value,)\n\n\ndef as_dimension(value):\n \"\"\"Converts the given value to a Dimension.\n\n A Dimension input will be returned unmodified.\n An input of `None` will be converted to an unknown Dimension.\n An integer input will be converted to a Dimension with that value.\n\n Args:\n value: The value to be converted.\n\n Returns:\n A Dimension corresponding to the given value.\n \"\"\"\n if isinstance(value, Dimension):\n return value\n else:\n return Dimension(value)\n\n\n@tf_export(\"TensorShape\")\nclass TensorShape(object):\n \"\"\"Represents the shape of a `Tensor`.\n\n A `TensorShape` represents a possibly-partial shape specification for a\n `Tensor`. It may be one of the following:\n\n * *Fully-known shape:* has a known number of dimensions and a known size\n for each dimension. e.g. `TensorShape([16, 256])`\n * *Partially-known shape:* has a known number of dimensions, and an unknown\n size for one or more dimension. e.g. `TensorShape([None, 256])`\n * *Unknown shape:* has an unknown number of dimensions, and an unknown\n size in all dimensions. e.g. `TensorShape(None)`\n\n If a tensor is produced by an operation of type `\"Foo\"`, its shape\n may be inferred if there is a registered shape function for\n `\"Foo\"`. See [Shape\n functions](https://tensorflow.org/extend/adding_an_op#shape_functions_in_c)\n for details of shape functions and how to register them. Alternatively,\n the shape may be set explicitly using `tf.Tensor.set_shape`.\n \"\"\"\n __slots__ = [\"_dims\"]\n\n def __init__(self, dims):\n \"\"\"Creates a new TensorShape with the given dimensions.\n\n Args:\n dims: A list of Dimensions, or None if the shape is unspecified.\n\n Raises:\n TypeError: If dims cannot be converted to a list of dimensions.\n \"\"\"\n if isinstance(dims, (tuple, list)): # Most common case.\n self._dims = [Dimension(d) for d in dims]\n elif dims is None:\n self._dims = None\n elif isinstance(dims, tensor_shape_pb2.TensorShapeProto):\n if dims.unknown_rank:\n self._dims = None\n else:\n self._dims = [\n # Protos store variable-size dimensions as -1\n as_dimension(dim.size if dim.size != -1 else None)\n for dim in dims.dim\n ]\n elif isinstance(dims, TensorShape):\n self._dims = dims.dims\n else:\n try:\n dims_iter = iter(dims)\n except TypeError:\n # Treat as a singleton dimension\n self._dims = [as_dimension(dims)]\n else:\n self._dims = []\n for d in dims_iter:\n try:\n self._dims.append(as_dimension(d))\n except TypeError as e:\n six.raise_from(\n TypeError(\n \"Failed to convert '{0!r}' to a shape: '{1!r}'\"\n \"could not be converted to a dimension. A shape should \"\n \"either be single dimension (e.g. 10), or an iterable of \"\n \"dimensions (e.g. [1, 10, None]).\"\n .format(dims, d)), e)\n\n @property\n def _v2_behavior(self):\n if _TENSORSHAPE_V2_OVERRIDE is None:\n return tf2.enabled()\n return _TENSORSHAPE_V2_OVERRIDE\n\n def __repr__(self):\n if self._v2_behavior:\n if self._dims is not None:\n return \"TensorShape(%r)\" % [dim.value for dim in self._dims]\n else:\n return \"TensorShape(None)\"\n else:\n return \"TensorShape(%r)\" % self._dims\n\n def __str__(self):\n if self.rank is None:\n return \"<unknown>\"\n elif self.rank == 1:\n if self._v2_behavior:\n return \"(%s,)\" % self._dims[0].value\n else:\n return \"(%s,)\" % self._dims[0]\n else:\n if self._v2_behavior:\n return \"(%s)\" % \", \".join(str(d.value) for d in self._dims)\n else:\n return \"(%s)\" % \", \".join(str(d) for d in self._dims)\n\n @property\n def rank(self):\n \"\"\"Returns the rank of this shape, or None if it is unspecified.\"\"\"\n if self._dims is not None:\n return len(self._dims)\n return None\n\n @property\n def dims(self):\n \"\"\"Deprecated. Returns list of dimensions for this shape.\n\n Suggest `TensorShape.as_list` instead.\n\n Returns:\n A list containing `tf.compat.v1.Dimension`s, or None if the shape is\n unspecified.\n \"\"\"\n return self._dims\n\n @property\n def ndims(self):\n \"\"\"Deprecated accessor for `rank`.\"\"\"\n return self.rank\n\n def __len__(self):\n \"\"\"Returns the rank of this shape, or raises ValueError if unspecified.\"\"\"\n if self._dims is None:\n raise ValueError(\"Cannot take the length of shape with unknown rank.\")\n return len(self._dims)\n\n def __bool__(self):\n \"\"\"Returns True if this shape contains non-zero information.\"\"\"\n return self._dims is not None\n\n # Python 3 wants __bool__, Python 2.7 wants __nonzero__\n __nonzero__ = __bool__\n\n def __iter__(self):\n \"\"\"Returns `self.dims` if the rank is known, otherwise raises ValueError.\"\"\"\n if self._dims is None:\n raise ValueError(\"Cannot iterate over a shape with unknown rank.\")\n else:\n if self._v2_behavior:\n return iter(d.value for d in self._dims)\n else:\n return iter(d for d in self._dims)\n\n def __getitem__(self, key):\n \"\"\"Returns the value of a dimension or a shape, depending on the key.\n\n Args:\n key: If `key` is an integer, returns the dimension at that index;\n otherwise if `key` is a slice, returns a TensorShape whose dimensions\n are those selected by the slice from `self`.\n\n Returns:\n An integer if `key` is an integer, or a `TensorShape` if `key` is a\n slice.\n\n Raises:\n ValueError: If `key` is a slice and `self` is completely unknown and\n the step is set.\n \"\"\"\n if self._dims is not None:\n if isinstance(key, slice):\n return TensorShape(self._dims[key])\n else:\n if self._v2_behavior:\n return self._dims[key].value\n else:\n return self._dims[key]\n else:\n if isinstance(key, slice):\n start = key.start if key.start is not None else 0\n stop = key.stop\n\n if key.step is not None:\n # TODO(mrry): Handle these maybe.\n raise ValueError(\"Steps are not yet handled\")\n if stop is None:\n # NOTE(mrry): This implies that TensorShape(None) is compatible with\n # TensorShape(None)[1:], which is obviously not true. It would be\n # possible to track the number of dimensions symbolically,\n # and perhaps we should do that.\n return unknown_shape()\n elif start < 0 or stop < 0:\n # TODO(mrry): Handle this better, as it will be useful for handling\n # suffixes of otherwise unknown shapes.\n return unknown_shape()\n else:\n return unknown_shape(rank=stop - start)\n else:\n if self._v2_behavior:\n return None\n else:\n return Dimension(None)\n\n def num_elements(self):\n \"\"\"Returns the total number of elements, or none for incomplete shapes.\"\"\"\n if self.is_fully_defined():\n return functools.reduce(operator.mul, self.as_list(), 1)\n else:\n return None\n\n def merge_with(self, other):\n \"\"\"Returns a `TensorShape` combining the information in `self` and `other`.\n\n The dimensions in `self` and `other` are merged element-wise,\n according to the rules below:\n\n ```python\n Dimension(n).merge_with(Dimension(None)) == Dimension(n)\n Dimension(None).merge_with(Dimension(n)) == Dimension(n)\n Dimension(None).merge_with(Dimension(None)) == Dimension(None)\n # raises ValueError for n != m\n Dimension(n).merge_with(Dimension(m))\n ```\n >> ts = tf.TensorShape([1,2])\n >> ot1 = tf.TensorShape([1,2])\n >> ts.merge_with(ot).as_list()\n [1,2]\n\n >> ot2 = tf.TensorShape([1,None])\n >> ts.merge_with(ot2).as_list()\n [1,2]\n\n >> ot3 = tf.TensorShape([None, None])\n >> ot3.merge_with(ot2).as_list()\n [1, None]\n\n Args:\n other: Another `TensorShape`.\n\n Returns:\n A `TensorShape` containing the combined information of `self` and\n `other`.\n\n Raises:\n ValueError: If `self` and `other` are not compatible.\n \"\"\"\n other = as_shape(other)\n if self._dims is None:\n return other\n if other.dims is None:\n return self\n else:\n try:\n self.assert_same_rank(other)\n new_dims = [\n dim.merge_with(other_dim)\n for dim, other_dim in zip(self._dims, other.dims)\n ]\n return TensorShape(new_dims)\n except ValueError:\n raise ValueError(\"Shapes %s and %s are not compatible\" % (self, other))\n\n def __add__(self, other):\n return self.concatenate(other)\n\n def __radd__(self, other):\n if not isinstance(other, TensorShape):\n other = TensorShape(other)\n return other.concatenate(self)\n\n def __hash__(self):\n if not self.is_fully_defined():\n raise ValueError(\"Unable to hash partially defined TensorShape.\")\n return hash(tuple(self._dims))\n\n def concatenate(self, other):\n \"\"\"Returns the concatenation of the dimension in `self` and `other`.\n\n *N.B.* If either `self` or `other` is completely unknown,\n concatenation will discard information about the other shape. In\n future, we might support concatenation that preserves this\n information for use with slicing.\n\n Args:\n other: Another `TensorShape`.\n\n Returns:\n A `TensorShape` whose dimensions are the concatenation of the\n dimensions in `self` and `other`.\n \"\"\"\n # TODO(mrry): Handle the case where we concatenate a known shape with a\n # completely unknown shape, so that we can use the partial information.\n other = as_shape(other)\n if self._dims is None or other.dims is None:\n return unknown_shape()\n else:\n return TensorShape(self._dims + other.dims)\n\n def assert_same_rank(self, other):\n \"\"\"Raises an exception if `self` and `other` do not have compatible ranks.\n\n Args:\n other: Another `TensorShape`.\n\n Raises:\n ValueError: If `self` and `other` do not represent shapes with the\n same rank.\n \"\"\"\n other = as_shape(other)\n if self.rank is not None and other.rank is not None:\n if self.rank != other.rank:\n raise ValueError(\"Shapes %s and %s must have the same rank\" %\n (self, other))\n\n def assert_has_rank(self, rank):\n \"\"\"Raises an exception if `self` is not compatible with the given `rank`.\n\n Args:\n rank: An integer.\n\n Raises:\n ValueError: If `self` does not represent a shape with the given `rank`.\n \"\"\"\n if self.rank not in (None, rank):\n raise ValueError(\"Shape %s must have rank %d\" % (self, rank))\n\n def with_rank(self, rank):\n \"\"\"Returns a shape based on `self` with the given rank.\n\n This method promotes a completely unknown shape to one with a\n known rank.\n\n Args:\n rank: An integer.\n\n Returns:\n A shape that is at least as specific as `self` with the given rank.\n\n Raises:\n ValueError: If `self` does not represent a shape with the given `rank`.\n \"\"\"\n try:\n return self.merge_with(unknown_shape(rank=rank))\n except ValueError:\n raise ValueError(\"Shape %s must have rank %d\" % (self, rank))\n\n def with_rank_at_least(self, rank):\n \"\"\"Returns a shape based on `self` with at least the given rank.\n\n Args:\n rank: An integer.\n\n Returns:\n A shape that is at least as specific as `self` with at least the given\n rank.\n\n Raises:\n ValueError: If `self` does not represent a shape with at least the given\n `rank`.\n \"\"\"\n if self.rank is not None and self.rank < rank:\n raise ValueError(\"Shape %s must have rank at least %d\" % (self, rank))\n else:\n return self\n\n def with_rank_at_most(self, rank):\n \"\"\"Returns a shape based on `self` with at most the given rank.\n\n Args:\n rank: An integer.\n\n Returns:\n A shape that is at least as specific as `self` with at most the given\n rank.\n\n Raises:\n ValueError: If `self` does not represent a shape with at most the given\n `rank`.\n \"\"\"\n if self.rank is not None and self.rank > rank:\n raise ValueError(\"Shape %s must have rank at most %d\" % (self, rank))\n else:\n return self\n\n def is_compatible_with(self, other):\n \"\"\"Returns True iff `self` is compatible with `other`.\n\n Two possibly-partially-defined shapes are compatible if there\n exists a fully-defined shape that both shapes can represent. Thus,\n compatibility allows the shape inference code to reason about\n partially-defined shapes. For example:\n\n * TensorShape(None) is compatible with all shapes.\n\n * TensorShape([None, None]) is compatible with all two-dimensional\n shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is\n not compatible with, for example, TensorShape([None]) or\n TensorShape([None, None, None]).\n\n * TensorShape([32, None]) is compatible with all two-dimensional shapes\n with size 32 in the 0th dimension, and also TensorShape([None, None])\n and TensorShape(None). It is not compatible with, for example,\n TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]).\n\n * TensorShape([32, 784]) is compatible with itself, and also\n TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None,\n None]) and TensorShape(None). It is not compatible with, for example,\n TensorShape([32, 1, 784]) or TensorShape([None]).\n\n The compatibility relation is reflexive and symmetric, but not\n transitive. For example, TensorShape([32, 784]) is compatible with\n TensorShape(None), and TensorShape(None) is compatible with\n TensorShape([4, 4]), but TensorShape([32, 784]) is not compatible with\n TensorShape([4, 4]).\n\n Args:\n other: Another TensorShape.\n\n Returns:\n True iff `self` is compatible with `other`.\n\n \"\"\"\n other = as_shape(other)\n if self._dims is not None and other.dims is not None:\n if self.rank != other.rank:\n return False\n for x_dim, y_dim in zip(self._dims, other.dims):\n if not x_dim.is_compatible_with(y_dim):\n return False\n return True\n\n def assert_is_compatible_with(self, other):\n \"\"\"Raises exception if `self` and `other` do not represent the same shape.\n\n This method can be used to assert that there exists a shape that both\n `self` and `other` represent.\n\n Args:\n other: Another TensorShape.\n\n Raises:\n ValueError: If `self` and `other` do not represent the same shape.\n \"\"\"\n if not self.is_compatible_with(other):\n raise ValueError(\"Shapes %s and %s are incompatible\" % (self, other))\n\n def most_specific_compatible_shape(self, other):\n \"\"\"Returns the most specific TensorShape compatible with `self` and `other`.\n\n * TensorShape([None, 1]) is the most specific TensorShape compatible with\n both TensorShape([2, 1]) and TensorShape([5, 1]). Note that\n TensorShape(None) is also compatible with above mentioned TensorShapes.\n\n * TensorShape([1, 2, 3]) is the most specific TensorShape compatible with\n both TensorShape([1, 2, 3]) and TensorShape([1, 2, 3]). There are more\n less specific TensorShapes compatible with above mentioned TensorShapes,\n e.g. TensorShape([1, 2, None]), TensorShape(None).\n\n Args:\n other: Another `TensorShape`.\n\n Returns:\n A `TensorShape` which is the most specific compatible shape of `self`\n and `other`.\n \"\"\"\n\n other = as_shape(other)\n if self._dims is None or other.dims is None or self.rank != other.rank:\n return unknown_shape()\n\n dims = [\n d1 if d1 is not None and d2 is not None and d1 == d2 else None\n for d1, d2 in zip(self._dims, other.dims)\n ]\n return TensorShape(dims)\n\n def is_fully_defined(self):\n \"\"\"Returns True iff `self` is fully defined in every dimension.\"\"\"\n return (self._dims is not None and\n all(dim.value is not None for dim in self._dims))\n\n def assert_is_fully_defined(self):\n \"\"\"Raises an exception if `self` is not fully defined in every dimension.\n\n Raises:\n ValueError: If `self` does not have a known value for every dimension.\n \"\"\"\n if not self.is_fully_defined():\n raise ValueError(\"Shape %s is not fully defined\" % self)\n\n def as_list(self):\n \"\"\"Returns a list of integers or `None` for each dimension.\n\n Returns:\n A list of integers or `None` for each dimension.\n\n Raises:\n ValueError: If `self` is an unknown shape with an unknown rank.\n \"\"\"\n if self._dims is None:\n raise ValueError(\"as_list() is not defined on an unknown TensorShape.\")\n return [dim.value for dim in self._dims]\n\n def as_proto(self):\n \"\"\"Returns this shape as a `TensorShapeProto`.\"\"\"\n if self._dims is None:\n return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)\n else:\n return tensor_shape_pb2.TensorShapeProto(dim=[\n tensor_shape_pb2.TensorShapeProto.Dim(\n size=-1 if d.value is None else d.value) for d in self._dims\n ])\n\n def __eq__(self, other):\n \"\"\"Returns True if `self` is equivalent to `other`.\n\n It first tries to convert `other` to `TensorShape`. `TypeError` is thrown\n when the conversion fails. Otherwise, it compares each element in the\n TensorShape dimensions.\n\n * Two *Fully known* shapes, return True iff each element is equal.\n >>> t_a = tf.TensorShape([1,2])\n >>> a = [1, 2]\n >>> t_b = tf.TensorShape([1,2])\n >>> t_c = tf.TensorShape([1,2,3])\n >>> t_a.__eq__(a)\n True\n >>> t_a.__eq__(t_b)\n True\n >>> t_a.__eq__(t_c)\n False\n\n * Two *Partially-known* shapes, return False.\n >>> p_a = tf.TensorShape([1,None])\n >>> p_b = tf.TensorShape([2,None])\n >>> p_a.__eq__(p_b)\n False\n >>> t_a.__eq__(p_a)\n False\n\n * Two *Unknown shape*, return True.\n >>> unk_a = tf.TensorShape(None)\n >>> unk_b = tf.TensorShape(None)\n >>> unk_a.__eq__(unk_b)\n True\n >>> unk_a.__eq__(t_a)\n False\n\n Args:\n other: A `TensorShape` or type that can be converted to `TensorShape`.\n\n Returns:\n True if the dimensions are all equal.\n\n Raises:\n TypeError if `other` can not be converted to `TensorShape`.\n \"\"\"\n\n try:\n other = as_shape(other)\n except TypeError:\n return NotImplemented\n return self._dims == other.dims\n\n def __ne__(self, other):\n \"\"\"Returns True if `self` is known to be different from `other`.\"\"\"\n try:\n other = as_shape(other)\n except TypeError:\n return NotImplemented\n if self.rank is None or other.rank is None:\n raise ValueError(\"The inequality of unknown TensorShapes is undefined.\")\n if self.rank != other.rank:\n return True\n return self._dims != other.dims\n\n def __reduce__(self):\n return TensorShape, (self._dims,)\n\n def __concat__(self, other):\n return self.concatenate(other)\n\n\ndef as_shape(shape):\n \"\"\"Converts the given object to a TensorShape.\"\"\"\n if isinstance(shape, TensorShape):\n return shape\n else:\n return TensorShape(shape)\n\n\ndef unknown_shape(rank=None, **kwargs):\n \"\"\"Returns an unknown TensorShape, optionally with a known rank.\n\n Args:\n rank: (Optional) If specified, the number of dimensions in the shape.\n **kwargs: For backwards compatibility.\n\n Returns:\n An unknown TensorShape.\n\n Raises:\n TypeError: In case of invalid arguments.\n \"\"\"\n if rank is None and \"ndims\" in kwargs:\n rank = kwargs.pop(\"ndims\")\n if kwargs:\n raise TypeError(\"Unknown argument: %s\" % kwargs)\n if rank is None:\n return TensorShape(None)\n else:\n return TensorShape([Dimension(None)] * rank)\n"
] |
[
[
"tensorflow.python.eager.monitoring.BoolGauge",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.core.framework.tensor_shape_pb2.TensorShapeProto",
"tensorflow.python.tf2.enabled",
"tensorflow.core.framework.tensor_shape_pb2.TensorShapeProto.Dim"
]
] |
frenebo/ZMOD
|
[
"58159fcbf61200c1ec2d6b92fca0cd9d4e83a208"
] |
[
"zmk/trainModel/mrcnn/utils.py"
] |
[
"\"\"\"\nMask R-CNN\nCommon utility functions and classes.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport sys\nimport os\nimport math\nimport random\nimport numpy as np\nimport tensorflow as tf\nimport scipy\nimport skimage.color\nimport skimage.io\nimport skimage.transform\nimport urllib.request\nimport shutil\nimport warnings\nfrom distutils.version import LooseVersion\n\n# URL from which to download the latest COCO trained weights\nCOCO_MODEL_URL = \"https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5\"\n\n\n############################################################\n# Bounding Boxes\n############################################################\n\ndef extract_bboxes(mask):\n \"\"\"Compute bounding boxes from masks.\n mask: [height, width, num_instances]. Mask pixels are either 1 or 0.\n\n Returns: bbox array [num_instances, (y1, x1, y2, x2)].\n \"\"\"\n boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)\n for i in range(mask.shape[-1]):\n m = mask[:, :, i]\n # Bounding box.\n horizontal_indicies = np.where(np.any(m, axis=0))[0]\n vertical_indicies = np.where(np.any(m, axis=1))[0]\n if horizontal_indicies.shape[0]:\n x1, x2 = horizontal_indicies[[0, -1]]\n y1, y2 = vertical_indicies[[0, -1]]\n # x2 and y2 should not be part of the box. Increment by 1.\n x2 += 1\n y2 += 1\n else:\n # No mask for this instance. Might happen due to\n # resizing or cropping. Set bbox to zeros\n x1, x2, y1, y2 = 0, 0, 0, 0\n boxes[i] = np.array([y1, x1, y2, x2])\n return boxes.astype(np.int32)\n\n\ndef compute_iou(box, boxes, box_area, boxes_area):\n \"\"\"Calculates IoU of the given box with the array of the given boxes.\n box: 1D vector [y1, x1, y2, x2]\n boxes: [boxes_count, (y1, x1, y2, x2)]\n box_area: float. the area of 'box'\n boxes_area: array of length boxes_count.\n\n Note: the areas are passed in rather than calculated here for\n efficiency. Calculate once in the caller to avoid duplicate work.\n \"\"\"\n # Calculate intersection areas\n y1 = np.maximum(box[0], boxes[:, 0])\n y2 = np.minimum(box[2], boxes[:, 2])\n x1 = np.maximum(box[1], boxes[:, 1])\n x2 = np.minimum(box[3], boxes[:, 3])\n intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)\n union = box_area + boxes_area[:] - intersection[:]\n iou = intersection / union\n return iou\n\n\ndef compute_overlaps(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n\n For better performance, pass the largest set first and the smaller second.\n \"\"\"\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps\n\n\ndef compute_overlaps_masks(masks1, masks2):\n \"\"\"Computes IoU overlaps between two sets of masks.\n masks1, masks2: [Height, Width, instances]\n \"\"\"\n \n # If either set of masks is empty return empty result\n if masks1.shape[-1] == 0 or masks2.shape[-1] == 0:\n return np.zeros((masks1.shape[-1], masks2.shape[-1]))\n # flatten masks and compute their areas\n masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32)\n masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32)\n area1 = np.sum(masks1, axis=0)\n area2 = np.sum(masks2, axis=0)\n\n # intersections and union\n intersections = np.dot(masks1.T, masks2)\n union = area1[:, None] + area2[None, :] - intersections\n overlaps = intersections / union\n\n return overlaps\n\n\ndef non_max_suppression(boxes, scores, threshold):\n \"\"\"Performs non-maximum suppression and returns indices of kept boxes.\n boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.\n scores: 1-D array of box scores.\n threshold: Float. IoU threshold to use for filtering.\n \"\"\"\n assert boxes.shape[0] > 0\n if boxes.dtype.kind != \"f\":\n boxes = boxes.astype(np.float32)\n\n # Compute box areas\n y1 = boxes[:, 0]\n x1 = boxes[:, 1]\n y2 = boxes[:, 2]\n x2 = boxes[:, 3]\n area = (y2 - y1) * (x2 - x1)\n\n # Get indicies of boxes sorted by scores (highest first)\n ixs = scores.argsort()[::-1]\n\n pick = []\n while len(ixs) > 0:\n # Pick top box and add its index to the list\n i = ixs[0]\n pick.append(i)\n # Compute IoU of the picked box with the rest\n iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])\n # Identify boxes with IoU over the threshold. This\n # returns indices into ixs[1:], so add 1 to get\n # indices into ixs.\n remove_ixs = np.where(iou > threshold)[0] + 1\n # Remove indices of the picked and overlapped boxes.\n ixs = np.delete(ixs, remove_ixs)\n ixs = np.delete(ixs, 0)\n return np.array(pick, dtype=np.int32)\n\n\ndef apply_box_deltas(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)]. Note that (y2, x2) is outside the box.\n deltas: [N, (dy, dx, log(dh), log(dw))]\n \"\"\"\n boxes = boxes.astype(np.float32)\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= np.exp(deltas[:, 2])\n width *= np.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n return np.stack([y1, x1, y2, x2], axis=1)\n\n\ndef box_refinement_graph(box, gt_box):\n \"\"\"Compute refinement needed to transform box to gt_box.\n box and gt_box are [N, (y1, x1, y2, x2)]\n \"\"\"\n box = tf.cast(box, tf.float32)\n gt_box = tf.cast(gt_box, tf.float32)\n\n height = box[:, 2] - box[:, 0]\n width = box[:, 3] - box[:, 1]\n center_y = box[:, 0] + 0.5 * height\n center_x = box[:, 1] + 0.5 * width\n\n gt_height = gt_box[:, 2] - gt_box[:, 0]\n gt_width = gt_box[:, 3] - gt_box[:, 1]\n gt_center_y = gt_box[:, 0] + 0.5 * gt_height\n gt_center_x = gt_box[:, 1] + 0.5 * gt_width\n\n dy = (gt_center_y - center_y) / height\n dx = (gt_center_x - center_x) / width\n dh = tf.log(gt_height / height)\n dw = tf.log(gt_width / width)\n\n result = tf.stack([dy, dx, dh, dw], axis=1)\n return result\n\n\ndef box_refinement(box, gt_box):\n \"\"\"Compute refinement needed to transform box to gt_box.\n box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is\n assumed to be outside the box.\n \"\"\"\n box = box.astype(np.float32)\n gt_box = gt_box.astype(np.float32)\n\n height = box[:, 2] - box[:, 0]\n width = box[:, 3] - box[:, 1]\n center_y = box[:, 0] + 0.5 * height\n center_x = box[:, 1] + 0.5 * width\n\n gt_height = gt_box[:, 2] - gt_box[:, 0]\n gt_width = gt_box[:, 3] - gt_box[:, 1]\n gt_center_y = gt_box[:, 0] + 0.5 * gt_height\n gt_center_x = gt_box[:, 1] + 0.5 * gt_width\n\n dy = (gt_center_y - center_y) / height\n dx = (gt_center_x - center_x) / width\n dh = np.log(gt_height / height)\n dw = np.log(gt_width / width)\n\n return np.stack([dy, dx, dh, dw], axis=1)\n\n\n############################################################\n# Dataset\n############################################################\n\nclass Dataset(object):\n \"\"\"The base class for dataset classes.\n To use it, create a new class that adds functions specific to the dataset\n you want to use. For example:\n\n class CatsAndDogsDataset(Dataset):\n def load_cats_and_dogs(self):\n ...\n def load_mask(self, image_id):\n ...\n def image_reference(self, image_id):\n ...\n\n See COCODataset and ShapesDataset as examples.\n \"\"\"\n\n def __init__(self, class_map=None):\n self._image_ids = []\n self.image_info = []\n # Background is always the first class\n self.class_info = [{\"source\": \"BG\", \"id\": 0, \"name\": \"BG\"}]\n self.source_class_ids = {}\n\n def add_class(self, source, class_id, class_name):\n assert \".\" not in source, \"Source name cannot contain a dot\"\n # Does the class exist already?\n for info in self.class_info:\n if info['source'] == source and info[\"id\"] == class_id:\n # source.class_id combination already available, skip\n return\n # Add the class\n self.class_info.append({\n \"source\": source,\n \"id\": class_id,\n \"name\": class_name,\n })\n\n def add_image(self, source, image_id, path, **kwargs):\n image_info = {\n \"id\": image_id,\n \"source\": source,\n \"path\": path,\n }\n image_info.update(kwargs)\n self.image_info.append(image_info)\n\n def image_reference(self, image_id):\n \"\"\"Return a link to the image in its source Website or details about\n the image that help looking it up or debugging it.\n\n Override for your dataset, but pass to this function\n if you encounter images not in your dataset.\n \"\"\"\n return \"\"\n\n def prepare(self, class_map=None):\n \"\"\"Prepares the Dataset class for use.\n\n TODO: class map is not supported yet. When done, it should handle mapping\n classes from different datasets to the same class ID.\n \"\"\"\n\n def clean_name(name):\n \"\"\"Returns a shorter version of object names for cleaner display.\"\"\"\n return \",\".join(name.split(\",\")[:1])\n\n # Build (or rebuild) everything else from the info dicts.\n self.num_classes = len(self.class_info)\n self.class_ids = np.arange(self.num_classes)\n self.class_names = [clean_name(c[\"name\"]) for c in self.class_info]\n self.num_images = len(self.image_info)\n self._image_ids = np.arange(self.num_images)\n\n # Mapping from source class and image IDs to internal IDs\n self.class_from_source_map = {\"{}.{}\".format(info['source'], info['id']): id\n for info, id in zip(self.class_info, self.class_ids)}\n self.image_from_source_map = {\"{}.{}\".format(info['source'], info['id']): id\n for info, id in zip(self.image_info, self.image_ids)}\n\n # Map sources to class_ids they support\n self.sources = list(set([i['source'] for i in self.class_info]))\n self.source_class_ids = {}\n # Loop over datasets\n for source in self.sources:\n self.source_class_ids[source] = []\n # Find classes that belong to this dataset\n for i, info in enumerate(self.class_info):\n # Include BG class in all datasets\n if i == 0 or source == info['source']:\n self.source_class_ids[source].append(i)\n\n def map_source_class_id(self, source_class_id):\n \"\"\"Takes a source class ID and returns the int class ID assigned to it.\n\n For example:\n dataset.map_source_class_id(\"coco.12\") -> 23\n \"\"\"\n return self.class_from_source_map[source_class_id]\n\n def get_source_class_id(self, class_id, source):\n \"\"\"Map an internal class ID to the corresponding class ID in the source dataset.\"\"\"\n info = self.class_info[class_id]\n assert info['source'] == source\n return info['id']\n\n @property\n def image_ids(self):\n return self._image_ids\n\n def source_image_link(self, image_id):\n \"\"\"Returns the path or URL to the image.\n Override this to return a URL to the image if it's available online for easy\n debugging.\n \"\"\"\n return self.image_info[image_id][\"path\"]\n\n def load_image(self, image_id):\n \"\"\"Load the specified image and return a [H,W,3] Numpy array.\n \"\"\"\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image\n\n def load_mask(self, image_id):\n \"\"\"Load instance masks for the given image.\n\n Different datasets use different ways to store masks. Override this\n method to load instance masks and return them in the form of am\n array of binary masks of shape [height, width, instances].\n\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n a binary mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n # Override this function to load a mask from your dataset.\n # Otherwise, it returns an empty mask.\n logging.warning(\"You are using the default load_mask(), maybe you need to define your own one.\")\n mask = np.empty([0, 0, 0])\n class_ids = np.empty([0], np.int32)\n return mask, class_ids\n\n\ndef resize_image(image, min_dim=None, max_dim=None, min_scale=None, mode=\"square\"):\n \"\"\"Resizes an image keeping the aspect ratio unchanged.\n\n min_dim: if provided, resizes the image such that it's smaller\n dimension == min_dim\n max_dim: if provided, ensures that the image longest side doesn't\n exceed this value.\n min_scale: if provided, ensure that the image is scaled up by at least\n this percent even if min_dim doesn't require it.\n mode: Resizing mode.\n none: No resizing. Return the image unchanged.\n square: Resize and pad with zeros to get a square image\n of size [max_dim, max_dim].\n pad64: Pads width and height with zeros to make them multiples of 64.\n If min_dim or min_scale are provided, it scales the image up\n before padding. max_dim is ignored in this mode.\n The multiple of 64 is needed to ensure smooth scaling of feature\n maps up and down the 6 levels of the FPN pyramid (2**6=64).\n crop: Picks random crops from the image. First, scales the image based\n on min_dim and min_scale, then picks a random crop of\n size min_dim x min_dim. Can be used in training only.\n max_dim is not used in this mode.\n\n Returns:\n image: the resized image\n window: (y1, x1, y2, x2). If max_dim is provided, padding might\n be inserted in the returned image. If so, this window is the\n coordinates of the image part of the full image (excluding\n the padding). The x2, y2 pixels are not included.\n scale: The scale factor used to resize the image\n padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]\n \"\"\"\n # Keep track of image dtype and return results in the same dtype\n image_dtype = image.dtype\n # Default window (y1, x1, y2, x2) and default scale == 1.\n h, w = image.shape[:2]\n window = (0, 0, h, w)\n scale = 1\n padding = [(0, 0), (0, 0), (0, 0)]\n crop = None\n\n if mode == \"none\":\n return image, window, scale, padding, crop\n\n # Scale?\n if min_dim:\n # Scale up but not down\n scale = max(1, min_dim / min(h, w))\n if min_scale and scale < min_scale:\n scale = min_scale\n\n # Does it exceed max dim?\n if max_dim and mode == \"square\":\n image_max = max(h, w)\n if round(image_max * scale) > max_dim:\n scale = max_dim / image_max\n\n # Resize image using bilinear interpolation\n if scale != 1:\n image = resize(image, (round(h * scale), round(w * scale)),\n preserve_range=True)\n\n # Need padding or cropping?\n if mode == \"square\":\n # Get new height and width\n h, w = image.shape[:2]\n top_pad = (max_dim - h) // 2\n bottom_pad = max_dim - h - top_pad\n left_pad = (max_dim - w) // 2\n right_pad = max_dim - w - left_pad\n padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\n image = np.pad(image, padding, mode='constant', constant_values=0)\n window = (top_pad, left_pad, h + top_pad, w + left_pad)\n elif mode == \"pad64\":\n h, w = image.shape[:2]\n # Both sides must be divisible by 64\n assert min_dim % 64 == 0, \"Minimum dimension must be a multiple of 64\"\n # Height\n if h % 64 > 0:\n max_h = h - (h % 64) + 64\n top_pad = (max_h - h) // 2\n bottom_pad = max_h - h - top_pad\n else:\n top_pad = bottom_pad = 0\n # Width\n if w % 64 > 0:\n max_w = w - (w % 64) + 64\n left_pad = (max_w - w) // 2\n right_pad = max_w - w - left_pad\n else:\n left_pad = right_pad = 0\n padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\n image = np.pad(image, padding, mode='constant', constant_values=0)\n window = (top_pad, left_pad, h + top_pad, w + left_pad)\n elif mode == \"crop\":\n # Pick a random crop\n h, w = image.shape[:2]\n y = random.randint(0, (h - min_dim))\n x = random.randint(0, (w - min_dim))\n crop = (y, x, min_dim, min_dim)\n image = image[y:y + min_dim, x:x + min_dim]\n window = (0, 0, min_dim, min_dim)\n else:\n raise Exception(\"Mode {} not supported\".format(mode))\n return image.astype(image_dtype), window, scale, padding, crop\n\n\ndef resize_mask(mask, scale, padding, crop=None):\n \"\"\"Resizes a mask using the given scale and padding.\n Typically, you get the scale and padding from resize_image() to\n ensure both, the image and the mask, are resized consistently.\n\n scale: mask scaling factor\n padding: Padding to add to the mask in the form\n [(top, bottom), (left, right), (0, 0)]\n \"\"\"\n # Suppress warning from scipy 0.13.0, the output shape of zoom() is\n # calculated with round() instead of int()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0)\n if crop is not None:\n y, x, h, w = crop\n mask = mask[y:y + h, x:x + w]\n else:\n mask = np.pad(mask, padding, mode='constant', constant_values=0)\n return mask\n\n\ndef minimize_mask(bbox, mask, mini_shape):\n \"\"\"Resize masks to a smaller version to reduce memory load.\n Mini-masks can be resized back to image scale using expand_masks()\n\n See inspect_data.ipynb notebook for more details.\n \"\"\"\n mini_mask = np.zeros(mini_shape + (mask.shape[-1],), dtype=bool)\n for i in range(mask.shape[-1]):\n # Pick slice and cast to bool in case load_mask() returned wrong dtype\n m = mask[:, :, i].astype(bool)\n y1, x1, y2, x2 = bbox[i][:4]\n m = m[y1:y2, x1:x2]\n if m.size == 0:\n raise Exception(\"Invalid bounding box with area of zero\")\n # Resize with bilinear interpolation\n m = resize(m, mini_shape)\n mini_mask[:, :, i] = np.around(m).astype(np.bool)\n return mini_mask\n\n\ndef expand_mask(bbox, mini_mask, image_shape):\n \"\"\"Resizes mini masks back to image size. Reverses the change\n of minimize_mask().\n\n See inspect_data.ipynb notebook for more details.\n \"\"\"\n mask = np.zeros(image_shape[:2] + (mini_mask.shape[-1],), dtype=bool)\n for i in range(mask.shape[-1]):\n m = mini_mask[:, :, i]\n y1, x1, y2, x2 = bbox[i][:4]\n h = y2 - y1\n w = x2 - x1\n # Resize with bilinear interpolation\n m = resize(m, (h, w))\n mask[y1:y2, x1:x2, i] = np.around(m).astype(np.bool)\n return mask\n\n\n# TODO: Build and use this function to reduce code duplication\ndef mold_mask(mask, config):\n pass\n\n\ndef unmold_mask(mask, bbox, image_shape):\n \"\"\"Converts a mask generated by the neural network to a format similar\n to its original shape.\n mask: [height, width] of type float. A small, typically 28x28 mask.\n bbox: [y1, x1, y2, x2]. The box to fit the mask in.\n\n Returns a binary mask with the same size as the original image.\n \"\"\"\n threshold = 0.5\n y1, x1, y2, x2 = bbox\n mask = resize(mask, (y2 - y1, x2 - x1))\n mask = np.where(mask >= threshold, 1, 0).astype(np.bool)\n\n # Put the mask in the right location.\n full_mask = np.zeros(image_shape[:2], dtype=np.bool)\n full_mask[y1:y2, x1:x2] = mask\n return full_mask\n\n\n############################################################\n# Anchors\n############################################################\n\ndef generate_anchors(scales, ratios, shape, feature_stride, anchor_stride):\n \"\"\"\n scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]\n ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]\n shape: [height, width] spatial shape of the feature map over which\n to generate anchors.\n feature_stride: Stride of the feature map relative to the image in pixels.\n anchor_stride: Stride of anchors on the feature map. For example, if the\n value is 2 then generate anchors for every other feature map pixel.\n \"\"\"\n # Get all combinations of scales and ratios\n scales, ratios = np.meshgrid(np.array(scales), np.array(ratios))\n scales = scales.flatten()\n ratios = ratios.flatten()\n\n # Enumerate heights and widths from scales and ratios\n heights = scales / np.sqrt(ratios)\n widths = scales * np.sqrt(ratios)\n\n # Enumerate shifts in feature space\n shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride\n shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride\n shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y)\n\n # Enumerate combinations of shifts, widths, and heights\n box_widths, box_centers_x = np.meshgrid(widths, shifts_x)\n box_heights, box_centers_y = np.meshgrid(heights, shifts_y)\n\n # Reshape to get a list of (y, x) and a list of (h, w)\n box_centers = np.stack(\n [box_centers_y, box_centers_x], axis=2).reshape([-1, 2])\n box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2])\n\n # Convert to corner coordinates (y1, x1, y2, x2)\n boxes = np.concatenate([box_centers - 0.5 * box_sizes,\n box_centers + 0.5 * box_sizes], axis=1)\n return boxes\n\n\ndef generate_pyramid_anchors(scales, ratios, feature_shapes, feature_strides,\n anchor_stride):\n \"\"\"Generate anchors at different levels of a feature pyramid. Each scale\n is associated with a level of the pyramid, but each ratio is used in\n all levels of the pyramid.\n\n Returns:\n anchors: [N, (y1, x1, y2, x2)]. All generated anchors in one array. Sorted\n with the same order of the given scales. So, anchors of scale[0] come\n first, then anchors of scale[1], and so on.\n \"\"\"\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n anchors = []\n for i in range(len(scales)):\n anchors.append(generate_anchors(scales[i], ratios, feature_shapes[i],\n feature_strides[i], anchor_stride))\n return np.concatenate(anchors, axis=0)\n\n\n############################################################\n# Miscellaneous\n############################################################\n\ndef trim_zeros(x):\n \"\"\"It's common to have tensors larger than the available data and\n pad with zeros. This function removes rows that are all zeros.\n\n x: [rows, columns].\n \"\"\"\n assert len(x.shape) == 2\n return x[~np.all(x == 0, axis=1)]\n\n\ndef compute_matches(gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold=0.5, score_threshold=0.0):\n \"\"\"Finds matches between prediction and ground truth instances.\n\n Returns:\n gt_match: 1-D array. For each GT box it has the index of the matched\n predicted box.\n pred_match: 1-D array. For each predicted box, it has the index of\n the matched ground truth box.\n overlaps: [pred_boxes, gt_boxes] IoU overlaps.\n \"\"\"\n # Trim zero padding\n # TODO: cleaner to do zero unpadding upstream\n gt_boxes = trim_zeros(gt_boxes)\n gt_masks = gt_masks[..., :gt_boxes.shape[0]]\n pred_boxes = trim_zeros(pred_boxes)\n pred_scores = pred_scores[:pred_boxes.shape[0]]\n # Sort predictions by score from high to low\n indices = np.argsort(pred_scores)[::-1]\n pred_boxes = pred_boxes[indices]\n pred_class_ids = pred_class_ids[indices]\n pred_scores = pred_scores[indices]\n pred_masks = pred_masks[..., indices]\n\n # Compute IoU overlaps [pred_masks, gt_masks]\n overlaps = compute_overlaps_masks(pred_masks, gt_masks)\n\n # Loop through predictions and find matching ground truth boxes\n match_count = 0\n pred_match = -1 * np.ones([pred_boxes.shape[0]])\n gt_match = -1 * np.ones([gt_boxes.shape[0]])\n for i in range(len(pred_boxes)):\n # Find best matching ground truth box\n # 1. Sort matches by score\n sorted_ixs = np.argsort(overlaps[i])[::-1]\n # 2. Remove low scores\n low_score_idx = np.where(overlaps[i, sorted_ixs] < score_threshold)[0]\n if low_score_idx.size > 0:\n sorted_ixs = sorted_ixs[:low_score_idx[0]]\n # 3. Find the match\n for j in sorted_ixs:\n # If ground truth box is already matched, go to next one\n if gt_match[j] > -1:\n continue\n # If we reach IoU smaller than the threshold, end the loop\n iou = overlaps[i, j]\n if iou < iou_threshold:\n break\n # Do we have a match?\n if pred_class_ids[i] == gt_class_ids[j]:\n match_count += 1\n gt_match[j] = i\n pred_match[i] = j\n break\n\n return gt_match, pred_match, overlaps\n\n\ndef compute_ap(gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold=0.5):\n \"\"\"Compute Average Precision at a set IoU threshold (default 0.5).\n\n Returns:\n mAP: Mean Average Precision\n precisions: List of precisions at different class score thresholds.\n recalls: List of recall values at different class score thresholds.\n overlaps: [pred_boxes, gt_boxes] IoU overlaps.\n \"\"\"\n # Get matches and overlaps\n gt_match, pred_match, overlaps = compute_matches(\n gt_boxes, gt_class_ids, gt_masks,\n pred_boxes, pred_class_ids, pred_scores, pred_masks,\n iou_threshold)\n\n # Compute precision and recall at each prediction box step\n precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1)\n recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match)\n\n # Pad with start and end values to simplify the math\n precisions = np.concatenate([[0], precisions, [0]])\n recalls = np.concatenate([[0], recalls, [1]])\n\n # Ensure precision values decrease but don't increase. This way, the\n # precision value at each recall threshold is the maximum it can be\n # for all following recall thresholds, as specified by the VOC paper.\n for i in range(len(precisions) - 2, -1, -1):\n precisions[i] = np.maximum(precisions[i], precisions[i + 1])\n\n # Compute mean AP over recall range\n indices = np.where(recalls[:-1] != recalls[1:])[0] + 1\n mAP = np.sum((recalls[indices] - recalls[indices - 1]) *\n precisions[indices])\n\n return mAP, precisions, recalls, overlaps\n\n\ndef compute_ap_range(gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n iou_thresholds=None, verbose=1):\n \"\"\"Compute AP over a range or IoU thresholds. Default range is 0.5-0.95.\"\"\"\n # Default is 0.5 to 0.95 with increments of 0.05\n iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05)\n \n # Compute AP over range of IoU thresholds\n AP = []\n for iou_threshold in iou_thresholds:\n ap, precisions, recalls, overlaps =\\\n compute_ap(gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n iou_threshold=iou_threshold)\n if verbose:\n print(\"AP @{:.2f}:\\t {:.3f}\".format(iou_threshold, ap))\n AP.append(ap)\n AP = np.array(AP).mean()\n if verbose:\n print(\"AP @{:.2f}-{:.2f}:\\t {:.3f}\".format(\n iou_thresholds[0], iou_thresholds[-1], AP))\n return AP\n\n\ndef compute_recall(pred_boxes, gt_boxes, iou):\n \"\"\"Compute the recall at the given IoU threshold. It's an indication\n of how many GT boxes were found by the given prediction boxes.\n\n pred_boxes: [N, (y1, x1, y2, x2)] in image coordinates\n gt_boxes: [N, (y1, x1, y2, x2)] in image coordinates\n \"\"\"\n # Measure overlaps\n overlaps = compute_overlaps(pred_boxes, gt_boxes)\n iou_max = np.max(overlaps, axis=1)\n iou_argmax = np.argmax(overlaps, axis=1)\n positive_ids = np.where(iou_max >= iou)[0]\n matched_gt_boxes = iou_argmax[positive_ids]\n\n recall = len(set(matched_gt_boxes)) / gt_boxes.shape[0]\n return recall, positive_ids\n\n\n# ## Batch Slicing\n# Some custom layers support a batch size of 1 only, and require a lot of work\n# to support batches greater than 1. This function slices an input tensor\n# across the batch dimension and feeds batches of size 1. Effectively,\n# an easy way to support batches > 1 quickly with little code modification.\n# In the long run, it's more efficient to modify the code to support large\n# batches and getting rid of this function. Consider this a temporary solution\ndef batch_slice(inputs, graph_fn, batch_size, names=None):\n \"\"\"Splits inputs into slices and feeds each slice to a copy of the given\n computation graph and then combines the results. It allows you to run a\n graph on a batch of inputs even if the graph is written to support one\n instance only.\n\n inputs: list of tensors. All must have the same first dimension length\n graph_fn: A function that returns a TF tensor that's part of a graph.\n batch_size: number of slices to divide the data into.\n names: If provided, assigns names to the resulting tensors.\n \"\"\"\n if not isinstance(inputs, list):\n inputs = [inputs]\n\n outputs = []\n for i in range(batch_size):\n inputs_slice = [x[i] for x in inputs]\n output_slice = graph_fn(*inputs_slice)\n if not isinstance(output_slice, (tuple, list)):\n output_slice = [output_slice]\n outputs.append(output_slice)\n # Change outputs from a list of slices where each is\n # a list of outputs to a list of outputs and each has\n # a list of slices\n outputs = list(zip(*outputs))\n\n if names is None:\n names = [None] * len(outputs)\n\n result = [tf.stack(o, axis=0, name=n)\n for o, n in zip(outputs, names)]\n if len(result) == 1:\n result = result[0]\n\n return result\n\n\ndef download_trained_weights(coco_model_path, verbose=1):\n \"\"\"Download COCO trained weights from Releases.\n\n coco_model_path: local path of COCO trained weights\n \"\"\"\n if verbose > 0:\n print(\"Downloading pretrained model to \" + coco_model_path + \" ...\")\n with urllib.request.urlopen(COCO_MODEL_URL) as resp, open(coco_model_path, 'wb') as out:\n shutil.copyfileobj(resp, out)\n if verbose > 0:\n print(\"... done downloading pretrained model!\")\n\n\ndef norm_boxes(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [N, (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [N, (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = shape\n scale = np.array([h - 1, w - 1, h - 1, w - 1])\n shift = np.array([0, 0, 1, 1])\n return np.divide((boxes - shift), scale).astype(np.float32)\n\n\ndef denorm_boxes(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [N, (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [N, (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = shape\n scale = np.array([h - 1, w - 1, h - 1, w - 1])\n shift = np.array([0, 0, 1, 1])\n return np.around(np.multiply(boxes, scale) + shift).astype(np.int32)\n\n\ndef resize(image, output_shape, order=1, mode='constant', cval=0, clip=True,\n preserve_range=False, anti_aliasing=False, anti_aliasing_sigma=None):\n \"\"\"A wrapper for Scikit-Image resize().\n\n Scikit-Image generates warnings on every call to resize() if it doesn't\n receive the right parameters. The right parameters depend on the version\n of skimage. This solves the problem by using different parameters per\n version. And it provides a central place to control resizing defaults.\n \"\"\"\n if LooseVersion(skimage.__version__) >= LooseVersion(\"0.14\"):\n # New in 0.14: anti_aliasing. Default it to False for backward\n # compatibility with skimage 0.13.\n return skimage.transform.resize(\n image, output_shape,\n order=order, mode=mode, cval=cval, clip=clip,\n preserve_range=preserve_range, anti_aliasing=anti_aliasing,\n anti_aliasing_sigma=anti_aliasing_sigma)\n else:\n return skimage.transform.resize(\n image, output_shape,\n order=order, mode=mode, cval=cval, clip=clip,\n preserve_range=preserve_range)\n"
] |
[
[
"numpy.dot",
"numpy.minimum",
"numpy.sqrt",
"tensorflow.stack",
"numpy.around",
"tensorflow.cast",
"numpy.cumsum",
"numpy.concatenate",
"numpy.max",
"numpy.all",
"numpy.any",
"numpy.exp",
"numpy.where",
"numpy.divide",
"numpy.pad",
"numpy.reshape",
"numpy.arange",
"scipy.ndimage.zoom",
"numpy.stack",
"numpy.argmax",
"numpy.zeros",
"numpy.log",
"numpy.multiply",
"numpy.delete",
"numpy.argsort",
"numpy.meshgrid",
"numpy.array",
"numpy.sum",
"numpy.maximum",
"numpy.ones",
"tensorflow.log",
"numpy.empty"
]
] |
simonjheiler/function_approximation
|
[
"1b52177ba5d80a02227f3f8f45b5dc2b73bd16bb"
] |
[
"src/auxiliary.py"
] |
[
"\"\"\"Collection of auxiliary functions for simulation study\"\"\"\nimport pdb # noqa F401\n\nimport numpy as np\nimport pandas as pd\n\n# import numba as nb\n\n\n#########################################################################\n# FUNCTIONS\n#########################################################################\n\n\ndef get_grid(grid_params, dims):\n \"\"\"Generate grid representation of the state space.\n\n *grid_params* stores default values for bounds and grid density; the function\n constructs a grid with dimensionality *dims* from these defaults\n\n Parameters\n ----------\n grid_params: dict\n \"orders\": np.arr\n Number of gridpoints by dimension\n \"lower bounds\": np.array\n Lower bounds of domain by dimension\n \"upper bounds\": np.array\n Upper bounds of domain by dimension\n dims : int\n Dimensionality of the grid to be constructed\n\n Returns\n -------\n grid : dict\n Dictionary of dimensions (keys) and corresponding 1d-grids (values)\n\n \"\"\"\n orders = np.array(object=grid_params[\"orders\"][:dims], dtype=int)\n grid_min = np.array(object=grid_params[\"lower bounds\"][:dims], dtype=float)\n grid_max = np.array(object=grid_params[\"upper bounds\"][:dims], dtype=float)\n\n # calculate number of grid points and generate index\n n_points = orders.prod()\n index = np.array(object=range(n_points))\n\n # generate grid\n grid = {}\n for idx in range(dims):\n grid_values_tmp = np.linspace(grid_min[idx], grid_max[idx], orders[idx],)\n grid[idx] = grid_values_tmp\n\n return grid, index\n\n\ndef get_dims_state_grid(dims, n_gridpoints):\n\n tmp = np.zeros(dims, dtype=np.int)\n for idx in range(dims):\n tmp[idx] = n_gridpoints[idx]\n\n dims_state_grid = np.array(object=list(tmp))\n\n return dims_state_grid\n\n\ndef get_corner_points(grid):\n \"\"\"Generate array of corner points of the state space.\n\n Parameters\n ----------\n grid : dict\n Dictionary of dimensions (keys) and corresponding 1d-grids (values)\n\n Returns\n -------\n corner_points: np.array(2^d, d)\n Array of corner points of state space characterized by *grid*\n\n \"\"\"\n\n grid_min = np.array(object=[min(v) for _, v in grid.items()])\n grid_max = np.array(object=[max(v) for _, v in grid.items()])\n\n grids_bounds = []\n for idx in range(len(grid_min)):\n tmp = [grid_min[idx], grid_max[idx]]\n grids_bounds.append(tmp)\n\n corner_points = pd.DataFrame(\n index=pd.MultiIndex.from_product(grids_bounds, names=range(grid_min.size),)\n ).reset_index()\n\n corner_points = np.array(object=corner_points)\n\n return corner_points\n\n\ndef get_local_grid(point, grid):\n \"\"\"Generate array of points on *grid* defining smallest on-grid hypercube\n containing *point*\n\n Parameters\n ----------\n point: np.array(1, d)\n Values of point for which closest on-grid cube is required\n grid : dict\n Dictionary of dimensions (keys) and corresponding 1d-grids (values)\n\n Returns\n -------\n local_grid: np.array(2^d, d)\n Array of corner points of smallest hypercube on *grid* containing *point*\n\n \"\"\"\n\n dims = len(grid)\n\n grids_bounds = np.full((dims, 2), np.nan)\n for dim in range(dims):\n tmp = np.searchsorted(grid[dim], point[dim])\n grids_bounds[dim, :] = [grid[dim][tmp - 1], grid[dim][tmp]]\n\n local_grid = pd.DataFrame(\n index=pd.MultiIndex.from_product(grids_bounds, names=range(dims),)\n ).reset_index()\n\n local_grid = np.array(object=local_grid)\n\n return local_grid\n\n\ndef state_to_id(state, dims_state_grid):\n id = 0\n for i in range(len(dims_state_grid) - 1):\n step_size = 1\n for d in dims_state_grid[i + 1 :]:\n step_size *= d\n id += state[i] * step_size\n id += state[-1]\n return id\n\n\ndef state_from_id(index, dims_state_grid):\n \"\"\"Convert *idx* to state array structured according to *dims_state_grid*\"\"\"\n\n entries = [index] * len(dims_state_grid)\n for i in range(1, len(dims_state_grid)):\n value = 1\n for j in range(i, len(dims_state_grid)):\n value *= dims_state_grid[j]\n for k in range(i - 1, len(dims_state_grid)):\n if k == i - 1:\n entries[k] //= value\n else:\n entries[k] %= value\n\n out = np.array(object=entries)\n\n return out\n\n\ndef inputs_from_state(state, grid):\n\n inputs_tmp = []\n\n for idx, val in enumerate(state):\n input = grid[idx][val]\n inputs_tmp.append(input)\n\n inputs = np.array(object=inputs_tmp)\n\n return inputs\n\n\ndef states_from_ids_batch(index, dims_state_grid):\n\n states_tmp = []\n\n for _idx, val in enumerate(index):\n state = state_from_id(val, dims_state_grid)\n states_tmp.append(state)\n\n states = np.array(object=states_tmp)\n\n return states\n\n\ndef inputs_from_ids_batch(index, dims_state_grid, grid):\n\n inputs_tmp = []\n\n for _idx, val in enumerate(index):\n state = state_from_id(val, dims_state_grid)\n input = inputs_from_state(state, grid)\n inputs_tmp.append(input)\n\n inputs = np.array(object=inputs_tmp)\n\n return inputs\n\n\ndef states_to_ids_batch(states, dims_state_grid):\n \"\"\"Translate points to index values.\n\n Parameters\n ----------\n\n\n Returns\n -------\n\n \"\"\"\n n_states, _ = states.shape\n ids = []\n\n for idx in range(n_states):\n id_tmp = state_to_id(states[idx, :], dims_state_grid)\n ids.append(id_tmp)\n\n return ids\n\n\ndef get_data(func, grid_size, n_vars, dims_state_grid, grid):\n\n # filenames\n file_states = func.__name__ + \"_\" + grid_size + \"_\" + str(n_vars) + \"_points.npy\"\n file_results = func.__name__ + \"_\" + grid_size + \"_\" + str(n_vars) + \"_results.npy\"\n\n try:\n points = np.load(\"./results/sandbox/\" + file_states)\n results = np.load(\"./results/sandbox/\" + file_results)\n except FileNotFoundError:\n n_states = dims_state_grid.prod()\n index = np.array(object=range(n_states), dtype=int)\n points = inputs_from_ids_batch(index, dims_state_grid, grid)\n results = func(points)\n # np.save(\"./results/sandbox/\" + file_states, points)\n # np.save(\"./results/sandbox/\" + file_results, results)\n\n return points, results\n\n\ndef mse(x1, x2, axis=0):\n \"\"\"Calculate mean squared error.\n\n If ``x1`` and ``x2`` have different shapes, then they need to broadcast. This uses\n :func:`numpy.asanyarray` to convert the input. Whether this is the desired result or\n not depends on the array subclass, for example NumPy matrices will silently\n produce an incorrect result.\n\n Parameters\n ----------\n x1, x2 : array_like\n The performance measure depends on the difference between these two arrays.\n axis : int\n Axis along which the summary statistic is calculated\n\n Returns\n -------\n mse : numpy.ndarray or float\n Mean squared error along given axis.\n\n \"\"\"\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.mean((x1 - x2) ** 2, axis=axis)\n\n\ndef msre(x1, x2, axis=0):\n \"\"\"Calculate mean squared relative error.\n\n If ``x1`` and ``x2`` have different shapes, then they need to broadcast. This uses\n :func:`numpy.asanyarray` to convert the input. Whether this is the desired result or\n not depends on the array subclass, for example NumPy matrices will silently\n produce an incorrect result.\n\n Parameters\n ----------\n x1, x2 : array_like\n The performance measure depends on the difference between these two arrays.\n axis : int\n Axis along which the summary statistic is calculated\n\n Returns\n -------\n mse : numpy.ndarray or float\n Mean squared relative error along given axis.\n\n \"\"\"\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.mean((((x1 - x2) ** 2) / x1), axis=axis)\n\n\ndef rmse(x1, x2, axis=0):\n \"\"\"Calculate root mean squared error.\n\n If ``x1`` and ``x2`` have different shapes, then they need to broadcast. This uses\n :func:`numpy.asanyarray` to convert the input. Whether this is the desired result or\n not depends on the array subclass, for example NumPy matrices will silently\n produce an incorrect result.\n\n Parameters\n ----------\n x1, x2 : array_like\n The performance measure depends on the difference between these two arrays.\n axis : int\n Axis along which the summary statistic is calculated.\n\n Returns\n -------\n rmse : numpy.ndarray or float\n Root mean squared error along given axis.\n\n \"\"\"\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.sqrt(mse(x1, x2, axis=axis))\n\n\ndef rmsre(x1, x2, axis=0):\n \"\"\"Calculate root mean squared error.\n\n If ``x1`` and ``x2`` have different shapes, then they need to broadcast. This uses\n :func:`numpy.asanyarray` to convert the input. Whether this is the desired result or\n not depends on the array subclass, for example NumPy matrices will silently\n produce an incorrect result.\n\n Parameters\n ----------\n x1, x2 : array_like\n The performance measure depends on the difference between these two arrays.\n axis : int\n Axis along which the summary statistic is calculated.\n\n Returns\n -------\n rmse : numpy.ndarray or float\n Root mean squared error along given axis.\n\n \"\"\"\n x1 = np.asanyarray(x1)\n x2 = np.asanyarray(x2)\n return np.sqrt(msre(x1, x2, axis=axis))\n\n\ndef get_interpolation_points(n_interpolation_points, grid, seed):\n \"\"\"Get a set of random points on the domain of *grid*.\n\n Parameters\n ----------\n n_interpolation_points : int\n Number of random points to generate.\n grid: dict\n Specification of state space from which to draw the points.\n seed : int\n Seed for random number generation.\n Returns\n -------\n interpolation_points : np.ndarray(n, d)\n Random points from the state space.\n \"\"\"\n np.random.seed(seed)\n\n grid_min = np.array(object=[min(v) for _, v in grid.items()])\n grid_max = np.array(object=[max(v) for _, v in grid.items()])\n\n points = []\n\n for _ in range(n_interpolation_points):\n tmp = np.random.uniform(0.0, 1.0, len(grid_min))\n points.append(tmp)\n\n interpolation_points = np.array(\n object=(\n points * grid_min\n + (np.ones((n_interpolation_points, len(grid_min))) - points) * grid_max\n ),\n dtype=float,\n )\n\n return interpolation_points\n"
] |
[
[
"numpy.random.seed",
"numpy.linspace",
"numpy.full",
"numpy.asanyarray",
"numpy.mean",
"numpy.searchsorted",
"numpy.load",
"numpy.array",
"numpy.zeros"
]
] |
yashbonde/chess-full-stack
|
[
"5ee67fc4583d56255bf902a7ad04c1a75822f5bc",
"5ee67fc4583d56255bf902a7ad04c1a75822f5bc"
] |
[
"chess_engine/zima_common/engine.py",
"zima_test.py"
] |
[
"\"\"\"\nthis is the main file that is involved with\n\"\"\"\n\nimport re\nimport chess\nimport numpy as np\n\nBOARD_POS = ['A1', 'B1', 'C1', 'D1', 'E1', 'F1', 'G1', 'H1', 'A2', 'B2', 'C2', 'D2', 'E2',\n 'F2', 'G2', 'H2', 'A3', 'B3', 'C3', 'D3', 'E3', 'F3', 'G3', 'H3', 'A4', 'B4', 'C4', 'D4',\n 'E4', 'F4', 'G4', 'H4', 'A5', 'B5', 'C5', 'D5', 'E5', 'F5', 'G5', 'H5', 'A6', 'B6', 'C6',\n 'D6', 'E6', 'F6', 'G6', 'H6', 'A7', 'B7', 'C7', 'D7', 'E7', 'F7', 'G7', 'H7', 'A8', 'B8',\n 'C8', 'D8', 'E8', 'F8', 'G8', 'H8']\nFLIP_DICT = {\"P\": \"P'\", \"N\": \"N'\", \"B\": \"B'\", \"R\": \"R'\", \"Q\": \"Q'\", \"K\": \"K'\"}\nFLIP_DICT2 = {\"P'\": 'p', \"N'\": 'n', \"B'\": 'b', \"R'\": 'r', \"Q'\": 'q', \"K'\": 'k'}\nRESULT_VALUE = {'0-1': -1, '1/2-1/2':0, '1-0':+1}\n\ndef flip_board_move(board, move):\n if not board.split()[1] == 'b':\n return board, move\n fen, _, castling_rights, enp, half_move, full_move = board.split()\n fen = fen[::-1].swapcase()\n if enp.upper() in BOARD_POS:\n enp = BOARD_POS[abs(63 - chess.__dict__[enp.upper()])].lower()\n castling_rights = board.split()[2]\n board = ' '.join([fen, 'w', castling_rights, enp, half_move, full_move])\n\n # now handle move\n move['from'] = abs(move['from'] - 63)\n move['to'] = abs(move['to'] - 63)\n board_ = chess.Board(board)\n del board_\n return board, move\n\n\ndef make_state(fen_string, return_legal=False, player_layer = False):\n board = chess.Board(fen_string)\n state = np.zeros(64, np.uint8)\n for i in range(64):\n pp = board.piece_at(i)\n if pp is not None:\n state[i] = {\"P\": 1, \"N\": 2, \"B\": 3, \"R\": 4, \"Q\": 5, \"K\": 6,\n \"p\": 9, \"n\": 10, \"b\": 11, \"r\": 12, \"q\": 13, \"k\": 14}[pp.symbol()]\n castling_rights = fen_string.split()[2]\n if board.has_queenside_castling_rights(chess.WHITE):\n assert state[0] == 4\n state[0] = 7\n if board.has_kingside_castling_rights(chess.WHITE):\n assert state[7] == 4\n state[7] = 7\n if board.has_queenside_castling_rights(chess.BLACK):\n assert state[56] == 12\n state[56] = 8 + 7\n if board.has_kingside_castling_rights(chess.BLACK):\n assert state[63] == 12\n state[63] = 8 + 7\n\n if board.ep_square is not None:\n assert state[board.ep_square] == 0\n state[board.ep_square] = 8\n state = state.reshape(8, 8) # reshape the state to target\n\n # binary state\n state_size = 5 if player_layer else 4\n b_state = np.zeros([state_size, 8, 8], np.uint8)\n b_state[0] = (state >> 3) & 1\n b_state[1] = (state >> 2) & 1\n b_state[2] = (state >> 1) & 1\n b_state[3] = (state >> 0) & 1\n if player_layer:\n b_state[4] = (board.turn * 1.)\n\n if return_legal:\n return np.transpose(b_state, [1, 2, 0]).astype(np.uint8), list(board.legal_moves)\n return np.transpose(b_state, [1, 2, 0]).astype(np.uint8)\n\n\ndef match_best_legal_move(from_sq_probs_sorted, to_square_probs_sorted, legal_moves):\n for fsq, tsq in zip(from_sq_probs_sorted, to_square_probs_sorted):\n legal_common = list(filter(\n lambda move: (move.from_square, move.to_square) == (\n fsq, tsq), legal_moves\n ))\n if not legal_common:\n continue\n move = legal_common[0]\n break\n return move\n\n\ndef preprocess_states(bboards, bmoves, bresults):\n bstates = []\n bfrom = []\n bto = []\n bvalues = []\n for board, move_obj, res in zip(bboards, bmoves, bresults):\n if board.split()[1] == 'b':\n # this is the blacks move and we need to flip the board and fix the moves\n board, move_obj = flip_board_move(board, move_obj)\n state, _ = make_state(board)\n bstates.append(state)\n bfrom.append(move_obj['from'])\n bto.append(move_obj['to'])\n bvalues.append(res)\n\n bstates = np.array(bstates).astype(np.float32)\n bfrom = np.array(bfrom).astype(np.int32)\n bto = np.array(bto).astype(np.int32)\n bvalues = np.array(bvalues)\n\n return bstates, bfrom, bto, bvalues\n",
"\"\"\"this is the test for zima-net\"\"\"\nimport chess\n\ndef test_build_network_policy():\n import tensorflow as tf\n from net import network as zima_net\n print(zima_net((\n tf.placeholder(tf.uint8, [None, 8, 8, 4]),\n tf.placeholder(tf.int32, [None, ]),\n tf.placeholder(tf.int32, [None, ]),\n tf.placeholder(tf.float32, [None, ])\n )))\n\ndef test_build_network_value_alphazero():\n from types import SimpleNamespace\n from chess_engine.zima_value import value_net\n import tensorflow as tf\n config = SimpleNamespace(\n lr = 0.2,\n lr_drops = [100,300,500],\n reg_const = 0.002,\n training = True\n )\n iter_obj = SimpleNamespace(\n board=tf.placeholder(tf.uint8, [None, 8, 8, 25]),\n value_target=tf.placeholder(tf.int32, [None, ])\n )\n network = value_net.value_network_alphazero(iter_obj, config)\n print(network)\n\n\ndef test_preprocess_states():\n from engine import make_state, preprocess_states\n boards = [\n '2r2rk1/4bp1p/bqn1p1pP/p3P3/1ppP1BN1/2p2NP1/P1P2P2/R1Q1R1K1 w - - 0 23',\n '2r2rk1/4bp1p/bqn1p1pP/p3P1B1/1ppP2N1/2p2NP1/P1P2P2/R1Q1R1K1 b - - 1 23',\n '2r2rk1/4bp1p/bq2p1pP/p3P1B1/1ppn2N1/2p2NP1/P1P2P2/R1Q1R1K1 w - - 0 24',\n '2r2rk1/4bp1p/bq2p1pP/p3P1B1/1ppN2N1/2p3P1/P1P2P2/R1Q1R1K1 b - - 0 24',\n '2r2rk1/4bp1p/b3p1pP/p3P1B1/1ppq2N1/2p3P1/P1P2P2/R1Q1R1K1 w - - 0 25'\n ]\n moves = [\n {'from': 29, 'to': 38, 'san': 'Bg5'},\n {'from': 42, 'to': 27, 'san': 'Nxd4'},\n {'from': 21, 'to': 27, 'san': 'Nxd4'},\n {'from': 41, 'to': 27, 'san': 'Qxd4'},\n {'from': 38, 'to': 52, 'san': 'Bxe7'}\n ]\n results = [\n -1.0,\n -1.0,\n -1.0,\n -1.0,\n -1.0\n ]\n\n bstates, bfrom, bto, bvalues = preprocess_states(boards, moves, results)\n print('-------- STATES ----------\\n{}'.format(bstates.shape))\n print('-------- MOVES (FROM) --------\\n{}'.format(bfrom))\n print('-------- MOVES (TO) --------\\n{}'.format(bto))\n print('-------- VALUES ---------\\n{}'.format(bvalues))\n\n\ndef test_trainer():\n from trainer import train_network_supervised\n from glob import glob\n train_network_supervised(glob('games_data/*.csv'),\n 'pokemon', num_epochs=60)\n\n\ndef test_data_generator():\n import tensorflow as tf\n from data_loader import get_batch\n from glob import glob\n batches, num_batches, tot_samples = get_batch(\n filenames=glob('games_data/*.csv'), batch_size=3)\n print(f'>>>>>>>>>>>>>>> batches: {batches}, num_batches: {num_batches}')\n\n # create a iterator of the correct shape and type\n iter = tf.data.Iterator.from_structure(\n batches.output_types, batches.output_shapes)\n xs = iter.get_next()\n train_init_op = iter.make_initializer(batches)\n print(f'=============== xs: {xs}')\n\n with tf.Session() as sess:\n sess.run(train_init_op)\n ops = (xs[0], xs[1], xs[2], xs[3])\n _board, _from, _to, _res = sess.run(ops)\n print(f'shape of board: {_board.shape}')\n print(f'from action: {_from}')\n print(f'to action: {_to}')\n print(f'result: {_res}')\n\n\ndef alphazero_network_operation():\n from chess_engine.zima_value.player import ValuePlayerNetworkWrapper\n in_state = [\n '6k1/6p1/8/pp6/4p3/1P1rB1PP/Pbb1NP2/4RK2 b - - 6 41',\n '6k1/6p1/8/pp6/4p3/bP1rB1PP/P1b1NP2/R4K2 b - - 4 40',\n 'r4rk1/1p2b1p1/p3b3/q3pp1Q/N2BB3/1P4P1/P4P1P/2R2RK1 b - - 0 24',\n 'r1b1k2r/1p2bppp/p1n1p3/q7/N3BB2/1P4P1/P1Q2P1P/R4RK1 b kq - 2 17',\n 'r1bqkb1r/pp1ppppp/2n2n2/2p5/2P5/2N2N2/PP1PPPPP/R1BQKB1R w KQkq - 2 4'\n ]\n player = ValuePlayerNetworkWrapper()\n new_state, boards, values = player.run_one_step_greedy(in_state)\n print('-----> new_state:', new_state)\n print('-----> boards', boards)\n print('-----> values:', values)\n\n\nif __name__ == \"__main__\":\n alphazero_network_operation()\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.transpose"
],
[
"tensorflow.placeholder",
"tensorflow.data.Iterator.from_structure",
"tensorflow.Session"
]
] |
ShirelJosef/deep-reinforcement-learning
|
[
"63979b975c71e730c9d4c66e39efac210260dd18"
] |
[
"monte-carlo/part_1.py"
] |
[
"import sys\nimport gym\nimport numpy as np\nfrom collections import defaultdict\n\nfrom plot_utils import plot_blackjack_values, plot_policy\n\nenv = gym.make('Blackjack-v0')\n\ndef generate_episode_from_limit_stochastic(bj_env):\n episode = []\n state = bj_env.reset()\n while True:\n probs = [0.8, 0.2] if state[0] > 18 else [0.2, 0.8]\n action = np.random.choice(np.arange(2), p=probs)\n next_state, reward, done, info = bj_env.step(action)\n episode.append((state, action, reward))\n state = next_state\n if done:\n break\n return episode\n\nfor i in range(3):\n print(generate_episode_from_limit_stochastic(env))\n\n\ndef mc_prediction_q(env, num_episodes, generate_episode, gamma=1.0):\n # initialize empty dictionaries of arrays\n returns_sum = defaultdict(lambda: np.zeros(env.action_space.n))\n N = defaultdict(lambda: np.zeros(env.action_space.n))\n Q = defaultdict(lambda: np.zeros(env.action_space.n))\n # loop over episodes\n for i_episode in range(1, num_episodes + 1):\n # monitor progress\n if i_episode % 1000 == 0:\n print(\"\\rEpisode {}/{}.\".format(i_episode, num_episodes), end=\"\")\n sys.stdout.flush()\n\n ## TODO: complete the function\n episode = generate_episode(env)\n visit = defaultdict(lambda: np.zeros(env.action_space.n))\n for i, (state, action, reward) in enumerate(episode):\n if visit[state][action] == 0:\n visit[state][action] = 1\n N[state][action] = N[state][action] + 1\n episode_reward = 0\n for j in range(len(episode) - i):\n episode_reward = episode_reward + (gamma ** j) * episode[i + j][2]\n returns_sum[state][action] = returns_sum[state][action] + episode_reward\n Q[state][action] = returns_sum[state][action] / N[state][action]\n return Q\n\n# obtain the action-value function\nQ = mc_prediction_q(env, 500000, generate_episode_from_limit_stochastic)\n\n# obtain the corresponding state-value function\nV_to_plot = dict((k,(k[0]>18)*(np.dot([0.8, 0.2],v)) + (k[0]<=18)*(np.dot([0.2, 0.8],v))) \\\n for k, v in Q.items())\n\n# plot the state-value function\nplot_blackjack_values(V_to_plot)"
] |
[
[
"numpy.arange",
"numpy.dot",
"numpy.zeros"
]
] |
ahnitz/arviz
|
[
"c4005b178c600f949d90dbe0258b9c8a1200626d"
] |
[
"arviz/plots/backends/matplotlib/elpdplot.py"
] |
[
"\"\"\"Matplotlib ELPDPlot.\"\"\"\nimport warnings\n\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.lines import Line2D\n\nfrom ....rcparams import rcParams\nfrom ...plot_utils import _scale_fig_size, color_from_dim, set_xticklabels\nfrom . import backend_kwarg_defaults, backend_show, create_axes_grid, matplotlib_kwarg_dealiaser\n\n\ndef plot_elpd(\n ax,\n models,\n pointwise_data,\n numvars,\n figsize,\n textsize,\n plot_kwargs,\n xlabels,\n coord_labels,\n xdata,\n threshold,\n legend,\n color,\n backend_kwargs,\n show,\n):\n \"\"\"Matplotlib elpd plot.\"\"\"\n if backend_kwargs is None:\n backend_kwargs = {}\n\n backend_kwargs = {\n **backend_kwarg_defaults(),\n **backend_kwargs,\n }\n backend_kwargs.setdefault(\"constrained_layout\", not xlabels)\n\n plot_kwargs = matplotlib_kwarg_dealiaser(plot_kwargs, \"scatter\")\n\n markersize = None\n\n if isinstance(color, str):\n if color in pointwise_data[0].dims:\n colors, color_mapping = color_from_dim(pointwise_data[0], color)\n cmap_name = plot_kwargs.pop(\"cmap\", plt.rcParams[\"image.cmap\"])\n markersize = plot_kwargs.pop(\"s\", plt.rcParams[\"lines.markersize\"])\n cmap = getattr(cm, cmap_name)\n handles = [\n Line2D(\n [], [], color=cmap(float_color), label=coord, ms=markersize, lw=0, **plot_kwargs\n )\n for coord, float_color in color_mapping.items()\n ]\n plot_kwargs.setdefault(\"cmap\", cmap_name)\n plot_kwargs.setdefault(\"s\", markersize ** 2)\n plot_kwargs.setdefault(\"c\", colors)\n else:\n legend = False\n else:\n legend = False\n plot_kwargs.setdefault(\"c\", color)\n\n # flatten data (data must be flattened after selecting, labeling and coloring)\n pointwise_data = [pointwise.values.flatten() for pointwise in pointwise_data]\n\n if numvars == 2:\n (figsize, ax_labelsize, titlesize, xt_labelsize, _, markersize) = _scale_fig_size(\n figsize, textsize, numvars - 1, numvars - 1\n )\n plot_kwargs.setdefault(\"s\", markersize ** 2)\n backend_kwargs.setdefault(\"figsize\", figsize)\n backend_kwargs[\"squeeze\"] = True\n if ax is None:\n fig, ax = create_axes_grid(\n 1,\n backend_kwargs=backend_kwargs,\n )\n else:\n fig = ax.get_figure()\n\n ydata = pointwise_data[0] - pointwise_data[1]\n ax.scatter(xdata, ydata, **plot_kwargs)\n if threshold is not None:\n diff_abs = np.abs(ydata - ydata.mean())\n bool_ary = diff_abs > threshold * ydata.std()\n if coord_labels is None:\n coord_labels = xdata.astype(str)\n outliers = np.argwhere(bool_ary).squeeze()\n for outlier in outliers:\n label = coord_labels[outlier]\n ax.text(\n outlier,\n ydata[outlier],\n label,\n horizontalalignment=\"center\",\n verticalalignment=\"bottom\" if ydata[outlier] > 0 else \"top\",\n fontsize=0.8 * xt_labelsize,\n )\n\n ax.set_title(\"{} - {}\".format(*models), fontsize=titlesize, wrap=True)\n ax.set_ylabel(\"ELPD difference\", fontsize=ax_labelsize, wrap=True)\n ax.tick_params(labelsize=xt_labelsize)\n if xlabels:\n set_xticklabels(ax, coord_labels)\n fig.autofmt_xdate()\n fig.tight_layout()\n if legend:\n ncols = len(handles) // 6 + 1\n ax.legend(handles=handles, ncol=ncols, title=color)\n\n else:\n max_plots = (\n numvars ** 2 if rcParams[\"plot.max_subplots\"] is None else rcParams[\"plot.max_subplots\"]\n )\n vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots)\n if vars_to_plot < numvars:\n warnings.warn(\n \"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number \"\n \"of resulting ELPD pairwise plots with these variables, generating only a \"\n \"{side}x{side} grid\".format(max_plots=max_plots, side=vars_to_plot),\n UserWarning,\n )\n numvars = vars_to_plot\n\n (figsize, ax_labelsize, titlesize, xt_labelsize, _, markersize) = _scale_fig_size(\n figsize, textsize, numvars - 2, numvars - 2\n )\n plot_kwargs.setdefault(\"s\", markersize ** 2)\n\n if ax is None:\n fig, ax = plt.subplots(\n numvars - 1,\n numvars - 1,\n figsize=figsize,\n squeeze=False,\n constrained_layout=not xlabels,\n sharey=\"row\",\n sharex=\"all\",\n )\n else:\n fig = ax.get_figure()\n\n for i in range(0, numvars - 1):\n var1 = pointwise_data[i]\n\n for j in range(0, numvars - 1):\n if j < i:\n ax[j, i].axis(\"off\")\n continue\n\n var2 = pointwise_data[j + 1]\n ax[j, i].scatter(xdata, var1 - var2, **plot_kwargs)\n if threshold is not None:\n ydata = var1 - var2\n diff_abs = np.abs(ydata - ydata.mean())\n bool_ary = diff_abs > threshold * ydata.std()\n if coord_labels is None:\n coord_labels = xdata.astype(str)\n outliers = np.argwhere(bool_ary).squeeze()\n for outlier in outliers:\n label = coord_labels[outlier]\n ax[j, i].text(\n outlier,\n ydata[outlier],\n label,\n horizontalalignment=\"center\",\n verticalalignment=\"bottom\" if ydata[outlier] > 0 else \"top\",\n fontsize=0.8 * xt_labelsize,\n )\n\n if i == 0:\n ax[j, i].set_ylabel(\"ELPD difference\", fontsize=ax_labelsize, wrap=True)\n\n ax[j, i].tick_params(labelsize=xt_labelsize)\n ax[j, i].set_title(\n \"{} - {}\".format(models[i], models[j + 1]), fontsize=titlesize, wrap=True\n )\n if xlabels:\n set_xticklabels(ax[-1, -1], coord_labels)\n fig.autofmt_xdate()\n fig.tight_layout()\n if legend:\n ncols = len(handles) // 6 + 1\n ax[0, 1].legend(\n handles=handles, ncol=ncols, title=color, bbox_to_anchor=(0, 1), loc=\"upper left\"\n )\n\n if backend_show(show):\n plt.show()\n\n return ax\n"
] |
[
[
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"numpy.argwhere"
]
] |
xzlmoon/QUANTAXIS
|
[
"e250e88a66becb244525ee29ecdf833e7a14d1d9"
] |
[
"QUANTAXIS/QAUtil/QATransform.py"
] |
[
"# coding:utf-8\n#\n# The MIT License (MIT)\n#\n# Copyright (c) 2016-2019 yutiansut/QUANTAXIS\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport csv\nimport json\n\nimport numpy as np\nimport pandas as pd\n\n\ndef QA_util_to_json_from_pandas(data):\n \"\"\"需要对于datetime 和date 进行转换, 以免直接被变成了时间戳\"\"\"\n if 'datetime' in data.columns:\n data.datetime = data.datetime.apply(str)\n if 'date' in data.columns:\n data.date = data.date.apply(str)\n return json.loads(data.to_json(orient='records'))\n\n\ndef QA_util_to_json_from_numpy(data):\n pass\n\n\ndef QA_util_to_json_from_list(data):\n pass\n\n\ndef QA_util_to_list_from_pandas(data):\n return np.asarray(data).tolist()\n\n\ndef QA_util_to_list_from_numpy(data):\n return data.tolist()\n\n\ndef QA_util_to_pandas_from_json(data):\n\n if isinstance(data, dict):\n return pd.DataFrame(data=[data, ])\n else:\n return pd.DataFrame(data=[{'value': data}])\n\n\ndef QA_util_to_pandas_from_list(data):\n if isinstance(data, list):\n return pd.DataFrame(data=data)\n"
] |
[
[
"numpy.asarray",
"pandas.DataFrame"
]
] |
beyondxz/CTGCN
|
[
"6afaca390dd91371521e9d6cb99087f8c34c43e9"
] |
[
"CTGCN/embedding.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport scipy.sparse as sp\nimport os, time, json, sys\nimport gc\nsys.path.append(\"..\")\nimport torch\nimport torch.nn as nn\nfrom CTGCN.utils import check_and_make_path, get_normalize_adj, get_sp_adj_mat, sparse_mx_to_torch_sparse_tensor\n\nclass DataLoader:\n full_node_list: list\n node_num: int\n max_time_num: int\n def __init__(self, node_list, max_time_num):\n self.max_time_num = max_time_num\n self.full_node_list = node_list\n self.node_num = len(self.full_node_list)\n return\n\n def get_date_adj_list(self, origin_base_path, start_idx, duration, data_type='tensor'):\n date_dir_list = sorted(os.listdir(origin_base_path))\n date_adj_list = []\n for i in range(start_idx, min(start_idx + duration, self.max_time_num)):\n original_graph_path = os.path.join(origin_base_path, date_dir_list[i])\n spmat = get_sp_adj_mat(original_graph_path, self.full_node_list, sep='\\t')\n # spmat = sp.coo_matrix((np.exp(alpha * spmat.data), (spmat.row, spmat.col)), shape=(self.node_num, self.node_num))\n spmat = get_normalize_adj(spmat)\n if data_type == 'tensor':\n sptensor = sparse_mx_to_torch_sparse_tensor(spmat)\n date_adj_list.append(sptensor.cuda() if torch.cuda.is_available() else sptensor)\n elif data_type == 'matrix':\n spmat = get_normalize_adj(spmat)\n date_adj_list.append(spmat)\n # print(len(date_adj_list))\n return date_adj_list\n\n def get_core_adj_list(self, core_base_path, start_idx, duration):\n date_dir_list = sorted(os.listdir(core_base_path))\n time_stamp_num = len(date_dir_list)\n assert start_idx < time_stamp_num\n core_adj_list = []\n for i in range(start_idx, min(start_idx + duration, self.max_time_num)):\n date_dir_path = os.path.join(core_base_path, date_dir_list[i])\n f_list = sorted(os.listdir(date_dir_path))\n tmp_adj_list = []\n for i, f_name in enumerate(f_list):\n spmat = sp.load_npz(os.path.join(date_dir_path, f_name))\n spmat = get_normalize_adj(spmat)\n sptensor = sparse_mx_to_torch_sparse_tensor(spmat)\n tmp_adj_list.append(sptensor.cuda() if torch.cuda.is_available() else sptensor)\n core_adj_list.append(tmp_adj_list)\n # print('core_adj_list len: ', len(core_adj_list))\n return core_adj_list\n\n def get_node_pair_list(self, walk_pair_base_path, start_idx, duration):\n walk_file_list = sorted(os.listdir(walk_pair_base_path))\n node_pair_list = []\n for i in range(start_idx, min(start_idx + duration, self.max_time_num)):\n walk_file_path = os.path.join(walk_pair_base_path, walk_file_list[i])\n walk_spadj = sp.load_npz(walk_file_path)\n neighbor_list = walk_spadj.tolil().rows\n node_pair_list.append(neighbor_list)\n return node_pair_list\n\n def get_neg_freq_list(self, node_freq_base_path, start_idx, duration):\n freq_file_list = sorted(os.listdir(node_freq_base_path))\n node_freq_list = []\n for i in range(start_idx, min(start_idx + duration, self.max_time_num)):\n freq_file_path = os.path.join(node_freq_base_path, freq_file_list[i])\n with open(freq_file_path, 'r') as fp:\n node_freq_list.append(json.load(fp))\n return node_freq_list\n\n def get_degree_feature_list(self, origin_base_path, start_idx, duration, init='gaussian'):\n x_list = []\n max_degree = 0\n adj_list = []\n degree_list = []\n ret_degree_list = []\n date_dir_list = sorted(os.listdir(origin_base_path))\n for i in range(start_idx, min(start_idx + duration, self.max_time_num)):\n original_graph_path = os.path.join(origin_base_path, date_dir_list[i])\n adj = get_sp_adj_mat(original_graph_path, self.full_node_list, sep='\\t')\n adj_list.append(adj)\n degrees = adj.sum(axis=1).astype(np.int)\n max_degree = max(max_degree, degrees.max())\n degree_list.append(degrees)\n ret_degree_list.append(torch.FloatTensor(degrees).cuda() if torch.cuda.is_available() else degrees)\n for i, degrees in enumerate(degree_list):\n # other structural feature initialization techiniques can also be tried to improve performance\n if init == 'gaussian':\n fea_list = []\n for degree in degrees:\n fea_list.append(np.random.normal(degree, 0.0001, max_degree + 1))\n fea_arr = np.array(fea_list)\n fea_tensor = torch.FloatTensor(fea_arr)\n x_list.append(fea_tensor.cuda() if torch.cuda.is_available() else fea_tensor)\n return x_list, fea_arr.shape[1], ret_degree_list\n elif init == 'combine':\n fea_list = []\n for degree in degrees:\n fea_list.append(np.random.normal(degree, 0.0001, max_degree + 1))\n fea_arr = np.array(fea_list)\n ###################\n # here combine the adjacent matrix feature could improve strcutral role classification performance,\n # but if the graph is large, adj feature will be memory consuming\n fea_arr = np.hstack((fea_arr, adj_list[i].toarray()))\n ###################\n fea_tensor = torch.FloatTensor(fea_arr)\n x_list.append(fea_tensor.cuda() if torch.cuda.is_available() else fea_tensor)\n return x_list, fea_arr.shape[1], ret_degree_list\n elif init == 'one-hot': # one-hot degree feature\n data = np.ones(degrees.shape[0], dtype=np.int)\n row = np.arange(degrees.shape[0])\n col = degrees.flatten().A[0]\n spmat = sp.csr_matrix((data, (row, col)), shape=(degrees.shape[0], max_degree + 1))\n sptensor = sparse_mx_to_torch_sparse_tensor(spmat)\n x_list.append(sptensor.cuda() if torch.cuda.is_available() else sptensor)\n print('max degree: ', max_degree + 1)\n return x_list, max_degree + 1, ret_degree_list\n else:\n raise AttributeError('Unsupported feature initialization type!')\n\n def get_feature_list(self, feature_base_path, start_idx, duration):\n if feature_base_path is None:\n x_list = []\n for i in range(start_idx, min(start_idx + duration, self.max_time_num)):\n sptensor = sparse_mx_to_torch_sparse_tensor(sp.eye(self.node_num))\n x_list.append(sptensor.cuda() if torch.cuda.is_available() else sptensor)\n print('len x_list: ', len(x_list))\n return x_list\n else:\n feature_file_list = sorted(os.listdir(feature_base_path))\n x_list = []\n for i in range(start_idx, min(start_idx + duration, self.max_time_num)):\n feature_file_path = os.path.join(feature_base_path, feature_file_list[i])\n df_feature = pd.read_csv(feature_file_path, sep='\\t', header=0)\n feature_tensor = torch.FloatTensor(df_feature.values)\n x_list.append(feature_tensor.cuda() if torch.cuda.is_available() else feature_tensor)\n print('len x_list: ', len(x_list))\n return x_list\n\nclass BaseEmbedding:\n base_path: str\n origin_base_path: str\n embedding_base_path: str\n model_base_path: str\n\n full_node_list: list\n node_num: int\n max_time_num: int\n timestamp_list: list\n device: torch.device\n\n def __init__(self, base_path, origin_folder, embedding_folder, node_list, model, loss, max_time_num, model_folder=\"model\"):\n # file paths\n self.base_path = base_path\n self.origin_base_path = os.path.abspath(os.path.join(base_path, origin_folder))\n self.embedding_base_path = os.path.abspath(os.path.join(base_path, embedding_folder))\n self.model_base_path = os.path.abspath(os.path.join(base_path, model_folder))\n\n self.full_node_list = node_list\n self.node_num = len(self.full_node_list) # node num\n self.timestamp_list = sorted(os.listdir(self.origin_base_path))\n\n # cpu gpu\n if torch.cuda.is_available():\n print(\"GPU\")\n device = torch.device(\"cuda: 0\")\n else:\n print(\"CPU\")\n device = torch.device(\"cpu\")\n self.set_thread()\n self.device = device\n\n self.model = model\n self.loss = loss\n self.max_time_num = max_time_num\n\n check_and_make_path(self.embedding_base_path)\n check_and_make_path(self.model_base_path)\n\n def set_thread(self, thread_num=None):\n if thread_num is None:\n thread_num = os.cpu_count() - 4\n torch.set_num_threads(thread_num)\n\nclass SupervisedEmbedding(BaseEmbedding):\n def __init__(self, base_path, origin_folder, embedding_folder, node_list, model, loss, classifier, max_time_num, model_folder=\"model\"):\n super(SupervisedEmbedding, self).__init__(base_path, origin_folder, embedding_folder, node_list, model, loss, max_time_num, model_folder=model_folder)\n self.classifier = classifier\n\n def learn_embedding(self, adj_list, x_list, label_list, single_output=True, epoch=50, batch_size=10240, lr=1e-3,\n start_idx=0, weight_decay=0., model_file='ctgcn', classifier_file='ctgcn_cls', embedding_type='connection', load_model=False, export=True):\n print('start learning embedding!')\n st = time.time()\n model = self.model\n classifier = self.classifier\n if load_model:\n model.load_state_dict(torch.load(os.path.join(self.model_base_path, model_file)))\n model.eval()\n classifier.load_state_dict(torch.load(os.path.join(self.model_base_path, classifier_file)))\n classifier.eval()\n\n if torch.cuda.is_available():\n model = model.to(self.device)\n classifier = classifier.to(self.device)\n torch.cuda.empty_cache()\n # optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.8, weight_decay=weight_decay)\n optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n optimizer.zero_grad()\n\n embedding_list, structure_list = [], []\n shuffled_node_idxs = np.random.permutation(np.arange(self.node_num))\n train_num = int(np.floor(self.node_num * 0.5))\n val_num = int(np.floor(self.node_num * 0.3))\n idx_train = shuffled_node_idxs[: train_num]\n label_train = label_list[idx_train]\n idx_val = shuffled_node_idxs[train_num: train_num + val_num]\n label_val= label_list[idx_val]\n idx_test = shuffled_node_idxs[train_num + val_num:]\n label_test = label_list[idx_test]\n\n batch_num = train_num // batch_size\n if train_num % batch_size != 0:\n batch_num += 1\n best_acc = 0\n # print('start training!')\n for i in range(epoch):\n node_idx_list = np.random.permutation(np.arange(train_num))\n for j in range(batch_num):\n train_node_idxs = node_idx_list[j * batch_size: min(train_num, (j + 1) * batch_size)]\n batch_node_idxs = idx_train[train_node_idxs]\n batch_labels = label_train[train_node_idxs]\n t1 = time.time()\n if single_output:\n embedding_list = model(x_list, adj_list)\n cls_list = classifier(embedding_list)\n loss_train, acc_train = self.loss(cls_list, batch_node_idxs, batch_labels, loss_type=embedding_type)\n else:\n embedding_list, structure_list = model(x_list, adj_list)\n cls_list = classifier(embedding_list)\n loss_train, acc_train = self.loss(cls_list, batch_node_idxs, batch_labels, loss_type=embedding_type, structure_list=structure_list, emb_list=embedding_list)\n loss_train.backward()\n # gradient accumulation\n if j == batch_num - 1:\n optimizer.step() # update gradient\n model.zero_grad()\n loss_val, acc_val = self.loss(cls_list, idx_val, label_val, loss_type=embedding_type, structure_list=structure_list, emb_list=embedding_list)\n print('Epoch: ' + str(i + 1), 'loss_train: {:.4f}'.format(loss_train.item()), 'acc_train: {:.4f}'.format(acc_train.item()),\n 'loss_val: {:.4f}'.format(loss_val.item()), 'acc_val: {:.4f}'.format(acc_val.item()), 'cost time: {:.4f}s'.format(time.time() - t1))\n if acc_val > best_acc:\n best_acc = acc_val\n torch.save(model.state_dict(), os.path.join(self.model_base_path, model_file))\n torch.save(classifier.state_dict(), os.path.join(self.model_base_path, classifier_file))\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n # t2 = time.time()\n # print(\"epoch\", i + 1, ', batch num = ', j + 1, \", loss train:\", loss_train.item(), ', cost time: ', t2 - t1, ' seconds!')\n model.load_state_dict(torch.load(os.path.join(self.model_base_path, model_file)))\n model.eval()\n classifier.load_state_dict(torch.load(os.path.join(self.model_base_path, classifier_file)))\n classifier.eval()\n\n if single_output:\n embedding_list = model(x_list, adj_list)\n cls_list = classifier(embedding_list)\n loss_test, acc_test = self.loss(cls_list, idx_test, label_test, loss_type=embedding_type)\n else:\n embedding_list, structure_list = model(x_list, adj_list)\n cls_list = classifier(embedding_list)\n loss_test, acc_test = self.loss(cls_list, idx_test, label_test, loss_type=embedding_type, structure_list=structure_list, emb_list=embedding_list)\n print(\"Test set results:\", \"loss= {:.4f}\".format(loss_test.item()), \"accuracy= {:.4f}\".format(acc_test.item()))\n\n if export:\n if embedding_type == 'connection':\n #print('connection')\n output_list = embedding_list\n elif embedding_type == 'structure':\n output_list = structure_list\n else:\n raise AttributeError('Unsupported embedding type!')\n if isinstance(output_list, list):\n for i in range(len(output_list)):\n embedding = output_list[i]\n timestamp = self.timestamp_list[start_idx + i].split('.')[0]\n df_export = pd.DataFrame(data=embedding.cpu().detach().numpy(), index=self.full_node_list)\n embedding_path = os.path.join(self.embedding_base_path, timestamp + \".csv\")\n df_export.to_csv(embedding_path, sep='\\t', header=True, index=True)\n else: # tensor\n if len(output_list.size()) == 3:\n for i in range(len(output_list)):\n embedding = torch.squeeze(output_list[i, :, :], dim=0)\n timestamp = self.timestamp_list[start_idx + i].split('.')[0]\n df_export = pd.DataFrame(data=embedding.cpu().detach().numpy(), index=self.full_node_list)\n embedding_path = os.path.join(self.embedding_base_path, timestamp + \".csv\")\n df_export.to_csv(embedding_path, sep='\\t', header=True, index=True)\n else:\n timestamp = self.timestamp_list[start_idx].split('.')[0]\n df_export = pd.DataFrame(data=output_list.cpu().detach().numpy(), index=self.full_node_list)\n embedding_path = os.path.join(self.embedding_base_path, timestamp + \".csv\")\n df_export.to_csv(embedding_path, sep='\\t', header=True, index=True)\n\n del adj_list, x_list, embedding_list, model\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n else:\n gc.collect()\n en = time.time()\n print('training total time: ', en - st, ' seconds!')\n return\n\n\nclass UnsupervisedEmbedding(BaseEmbedding):\n def __init__(self, base_path, origin_folder, embedding_folder, node_list, model, loss, max_time_num, model_folder=\"model\"):\n super(UnsupervisedEmbedding, self).__init__(base_path, origin_folder, embedding_folder, node_list, model, loss, max_time_num, model_folder=model_folder)\n\n def learn_embedding(self, adj_list, x_list, single_output=True, epoch=50, batch_size=10240, lr=1e-3,\n start_idx=0, weight_decay=0., model_file='ctgcn', embedding_type='connection', load_model=False, export=True):\n print('start learning embedding!')\n st = time.time()\n model = self.model\n if load_model:\n model.load_state_dict(torch.load(os.path.join(self.model_base_path, model_file)))\n model.eval()\n\n if torch.cuda.is_available():\n model = model.to(self.device)\n torch.cuda.empty_cache()\n # optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.8, weight_decay=weight_decay)\n optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n optimizer.zero_grad()\n\n embedding_list, structure_list = [], []\n batch_num = self.node_num // batch_size\n if self.node_num % batch_size != 0:\n batch_num += 1\n # print('start training!')\n for i in range(epoch):\n node_idx_list = np.random.permutation(np.arange(self.node_num))\n for j in range(batch_num):\n batch_node_idxs = node_idx_list[j * batch_size: min(self.node_num, (j + 1) * batch_size)]\n t1 = time.time()\n if single_output:\n embedding_list = model(x_list, adj_list)\n loss = self.loss(embedding_list, batch_node_idxs, loss_type=embedding_type)\n else:\n embedding_list, structure_list= model(x_list, adj_list)\n loss = self.loss(embedding_list, batch_node_idxs, loss_type=embedding_type, structure_list=structure_list)\n loss.backward()\n # gradient accumulation\n if j == batch_num - 1:\n optimizer.step() # update gradient\n model.zero_grad()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n t2 = time.time()\n print(\"epoch\", i + 1, ', batch num = ', j + 1, \", loss:\", loss.item(), ', cost time: ', t2 - t1, ' seconds!')\n\n if export:\n if embedding_type == 'connection':\n output_list = embedding_list\n elif embedding_type == 'structure':\n output_list = structure_list\n else:\n raise AttributeError('Unsupported embedding type!')\n if isinstance(output_list, list):\n for i in range(len(output_list)):\n embedding = output_list[i]\n timestamp = self.timestamp_list[start_idx + i].split('.')[0]\n df_export = pd.DataFrame(data=embedding.cpu().detach().numpy(), index=self.full_node_list)\n embedding_path = os.path.join(self.embedding_base_path, timestamp + \".csv\")\n df_export.to_csv(embedding_path, sep='\\t', header=True, index=True)\n else: # tensor\n if len(output_list.size()) == 3:\n for i in range(len(output_list)):\n embedding = torch.squeeze(output_list[i, :, :], dim=0)\n timestamp = self.timestamp_list[start_idx + i].split('.')[0]\n df_export = pd.DataFrame(data=embedding.cpu().detach().numpy(), index=self.full_node_list)\n embedding_path = os.path.join(self.embedding_base_path, timestamp + \".csv\")\n df_export.to_csv(embedding_path, sep='\\t', header=True, index=True)\n else:\n timestamp = self.timestamp_list[start_idx].split('.')[0]\n df_export = pd.DataFrame(data=output_list.cpu().detach().numpy(), index=self.full_node_list)\n embedding_path = os.path.join(self.embedding_base_path, timestamp + \".csv\")\n df_export.to_csv(embedding_path, sep='\\t', header=True, index=True)\n\n torch.save(model.state_dict(), os.path.join(self.model_base_path, model_file))\n del adj_list, x_list, embedding_list, model\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n else:\n gc.collect()\n en = time.time()\n print('training total time: ', en - st, ' seconds!')\n return"
] |
[
[
"pandas.read_csv",
"scipy.sparse.eye",
"numpy.arange",
"scipy.sparse.load_npz",
"torch.cuda.empty_cache",
"scipy.sparse.csr_matrix",
"numpy.ones",
"numpy.random.normal",
"torch.set_num_threads",
"torch.FloatTensor",
"torch.cuda.is_available",
"numpy.floor",
"torch.device",
"numpy.array",
"torch.squeeze"
]
] |
CaptEmulation/modeldb
|
[
"78b10aca553e386554f9740db63466b1cf013a1a"
] |
[
"client/workflows/examples/custom-modules/models/nets.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as func\n\nfrom utils import preprocess # ../utils/preprocess.py\n\n\nclass FullyConnected(nn.Module):\n def __init__(self, num_features, hidden_size, dropout):\n super(FullyConnected, self).__init__()\n self.flatten = preprocess.Flatten()\n self.fc = nn.Linear(num_features, hidden_size)\n self.dropout = nn.Dropout(dropout)\n self.output = nn.Linear(hidden_size, 10)\n\n def forward(self, x):\n x = self.flatten(x)\n x = func.relu(self.fc(x))\n x = self.dropout(x)\n x = func.softmax(self.output(x), dim=-1)\n return x\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.Dropout"
]
] |
18jeffreyma/tfx
|
[
"ff6917997340401570d05a4d3ebd6e8ab5760495"
] |
[
"tfx/orchestration/portable/python_driver_operator_test.py"
] |
[
"# Copyright 2020 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.orchestration.portable.python_driver_operator.\"\"\"\n\nfrom typing import Any, Dict, List, Text\n\nimport tensorflow as tf\nfrom tfx import types\nfrom tfx.orchestration.portable import base_driver\nfrom tfx.orchestration.portable import python_driver_operator\nfrom tfx.proto.orchestration import driver_output_pb2\nfrom tfx.proto.orchestration import local_deployment_config_pb2\n\n_DEFAULT_DRIVER_OUTPUT = driver_output_pb2.DriverOutput()\n\n\nclass _FakeNoopDriver(base_driver.BaseDriver):\n\n def run(self, input_dict: Dict[Text, List[types.Artifact]],\n output_dict: Dict[Text, List[types.Artifact]],\n exec_properties: Dict[Text, Any]) -> driver_output_pb2.DriverOutput:\n return _DEFAULT_DRIVER_OUTPUT\n\n\nclass PythonDriverOperatorTest(tf.test.TestCase):\n\n def succeed(self):\n custom_driver_spec = (\n local_deployment_config_pb2.ExecutableSpec.PythonClassExecutableSpec())\n custom_driver_spec.class_path = 'tfx.orchestration.portable.python_driver_operator._FakeNoopDriver'\n driver_operator = python_driver_operator.PythonDriverOperator(\n custom_driver_spec, None, None, None)\n driver_output = driver_operator.run_driver(None, None, None)\n self.assertEqual(driver_output, _DEFAULT_DRIVER_OUTPUT)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"tensorflow.test.main"
]
] |
glyg/magicgui
|
[
"53bbf622975e747f9609a01a501b5cff5c95b895"
] |
[
"magicgui/backends/_qtpy/widgets.py"
] |
[
"\"\"\"Widget implementations (adaptors) for the Qt backend.\"\"\"\nfrom __future__ import annotations\n\nimport math\nfrom typing import TYPE_CHECKING, Any, Iterable, Sequence\n\nimport qtpy\nfrom qtpy import QtWidgets as QtW\nfrom qtpy.QtCore import QEvent, QObject, Qt, Signal\nfrom qtpy.QtGui import (\n QFont,\n QFontMetrics,\n QImage,\n QKeyEvent,\n QPixmap,\n QResizeEvent,\n QTextDocument,\n)\n\nfrom magicgui.types import FileDialogMode\nfrom magicgui.widgets import _protocols\nfrom magicgui.widgets._bases import Widget\n\nif TYPE_CHECKING:\n import numpy as np\n\n\nclass EventFilter(QObject):\n parentChanged = Signal()\n valueChanged = Signal(object)\n\n def eventFilter(self, obj: QObject, event: QEvent):\n if event.type() == QEvent.ParentChange:\n self.parentChanged.emit()\n return False\n\n\nclass QBaseWidget(_protocols.WidgetProtocol):\n \"\"\"Implements show/hide/native.\"\"\"\n\n _qwidget: QtW.QWidget\n\n def __init__(self, qwidg: QtW.QWidget):\n self._qwidget = qwidg()\n self._qwidget.setObjectName(f\"magicgui.{qwidg.__name__}\")\n self._event_filter = EventFilter()\n self._qwidget.installEventFilter(self._event_filter)\n\n def _mgui_close_widget(self):\n self._qwidget.close()\n\n def _mgui_get_visible(self):\n return self._qwidget.isVisible()\n\n def _mgui_set_visible(self, value: bool):\n self._qwidget.setVisible(value)\n\n def _mgui_get_enabled(self) -> bool:\n return self._qwidget.isEnabled()\n\n def _mgui_set_enabled(self, enabled: bool):\n self._qwidget.setEnabled(enabled)\n\n # TODO: this used to return _magic_widget ... figure out what we should be returning\n def _mgui_get_parent(self):\n return self._qwidget.parent()\n\n def _mgui_set_parent(self, widget: Widget):\n self._qwidget.setParent(widget.native if widget else None)\n\n def _mgui_get_native_widget(self) -> QtW.QWidget:\n return self._qwidget\n\n def _mgui_get_width(self) -> int:\n \"\"\"Return the current width of the widget.\"\"\"\n return self._qwidget.width()\n\n def _mgui_set_width(self, value: int) -> None:\n \"\"\"Set the current width of the widget.\"\"\"\n self._qwidget.resize(value, self._qwidget.height())\n\n def _mgui_get_min_width(self) -> int:\n \"\"\"Get the minimum allowable width of the widget.\"\"\"\n return self._qwidget.minimumWidth()\n\n def _mgui_set_min_width(self, value: int) -> None:\n \"\"\"Set the minimum allowable width of the widget.\"\"\"\n self._qwidget.setMinimumWidth(value)\n self._qwidget.resize(self._qwidget.sizeHint())\n\n def _mgui_get_max_width(self) -> int:\n \"\"\"Get the maximum allowable width of the widget.\"\"\"\n return self._qwidget.maximumWidth()\n\n def _mgui_set_max_width(self, value: int) -> None:\n \"\"\"Set the maximum allowable width of the widget.\"\"\"\n self._qwidget.setMaximumWidth(value)\n self._qwidget.resize(self._qwidget.sizeHint())\n\n def _mgui_get_height(self) -> int:\n \"\"\"Return the current height of the widget.\"\"\"\n return self._qwidget.height()\n\n def _mgui_set_height(self, value: int) -> None:\n \"\"\"Set the current height of the widget.\"\"\"\n self._qwidget.resize(self._qwidget.width(), value)\n\n def _mgui_get_min_height(self) -> int:\n \"\"\"Get the minimum allowable height of the widget.\"\"\"\n return self._qwidget.minimumHeight()\n\n def _mgui_set_min_height(self, value: int) -> None:\n \"\"\"Set the minimum allowable height of the widget.\"\"\"\n self._qwidget.setMinimumHeight(value)\n self._qwidget.resize(self._qwidget.sizeHint())\n\n def _mgui_get_max_height(self) -> int:\n \"\"\"Get the maximum allowable height of the widget.\"\"\"\n return self._qwidget.maximumHeight()\n\n def _mgui_set_max_height(self, value: int) -> None:\n \"\"\"Set the maximum allowable height of the widget.\"\"\"\n self._qwidget.setMaximumHeight(value)\n self._qwidget.resize(self._qwidget.sizeHint())\n\n def _mgui_get_tooltip(self) -> str:\n return self._qwidget.toolTip()\n\n def _mgui_set_tooltip(self, value: str | None) -> None:\n self._qwidget.setToolTip(str(value) if value else None)\n\n def _mgui_bind_parent_change_callback(self, callback):\n self._event_filter.parentChanged.connect(callback)\n\n def _mgui_render(self):\n try:\n import numpy as np\n except ImportError:\n raise ModuleNotFoundError(\n \"could not find module 'numpy'. \"\n \"Please `pip install numpy` to render widgets.\"\n ) from None\n\n img = self._qwidget.grab().toImage()\n bits = img.constBits()\n h, w, c = img.height(), img.width(), 4\n if qtpy.API_NAME == \"PySide2\":\n arr = np.array(bits).reshape(h, w, c)\n else:\n bits.setsize(h * w * c)\n arr = np.frombuffer(bits, np.uint8).reshape(h, w, c)\n\n return arr[:, :, [2, 1, 0, 3]]\n\n\nclass QBaseValueWidget(QBaseWidget, _protocols.ValueWidgetProtocol):\n \"\"\"Implements get/set/bind_change.\"\"\"\n\n def __init__(self, qwidg: QtW.QWidget, getter: str, setter: str, onchange: str):\n super().__init__(qwidg)\n self._getter_name = getter\n self._setter_name = setter\n self._onchange_name = onchange\n\n def _mgui_get_value(self) -> Any:\n val = getattr(self._qwidget, self._getter_name)()\n return self._post_get_hook(val)\n\n def _mgui_set_value(self, value) -> None:\n val = self._pre_set_hook(value)\n getattr(self._qwidget, self._setter_name)(val)\n\n def _mgui_bind_change_callback(self, callback):\n signal_instance = getattr(self._qwidget, self._onchange_name, None)\n if signal_instance:\n signal_instance.connect(callback)\n\n def _post_get_hook(self, value):\n return value\n\n def _pre_set_hook(self, value):\n return value\n\n\n# BASE WIDGET\n\n\nclass EmptyWidget(QBaseWidget):\n def __init__(self):\n super().__init__(QtW.QWidget)\n\n def _mgui_get_value(self) -> Any:\n raise NotImplementedError()\n\n def _mgui_set_value(self, value) -> None:\n raise NotImplementedError()\n\n def _mgui_bind_change_callback(self, callback):\n pass\n\n\n# STRING WIDGETS\n\n\nclass QBaseStringWidget(QBaseValueWidget):\n _qwidget: QtW.QLineEdit | QtW.QTextEdit | QtW.QLabel\n\n def _mgui_set_value(self, value) -> None:\n super()._mgui_set_value(str(value))\n\n\nclass Label(QBaseStringWidget):\n _qwidget: QtW.QLabel\n\n def __init__(self):\n super().__init__(QtW.QLabel, \"text\", \"setText\", \"\")\n self._qwidget.setSizePolicy(QtW.QSizePolicy.Fixed, QtW.QSizePolicy.Fixed)\n\n def _mgui_bind_change_callback(self, callback):\n # raise NotImplementedError(\"QLabel has no change signal\")\n pass\n\n def _mgui_set_value(self, value) -> None:\n super()._mgui_set_value(str(value))\n\n\nclass _ResizeableLabel(QtW.QLabel):\n resized = Signal()\n\n def resizeEvent(self, a0: QResizeEvent) -> None:\n self.resized.emit()\n return super().resizeEvent(a0)\n\n\nclass Image(QBaseValueWidget):\n _qwidget: _ResizeableLabel\n\n def __init__(self):\n super().__init__(_ResizeableLabel, \"text\", \"setText\", \"\")\n self._qwidget.setSizePolicy(QtW.QSizePolicy.Ignored, QtW.QSizePolicy.Ignored)\n self._qwidget.resized.connect(self._rescale)\n self._pixmap: QPixmap = None\n\n def _rescale(self):\n if self._pixmap:\n sz = self._qwidget.size()\n self._qwidget.setPixmap(\n self._pixmap.scaled(sz, Qt.KeepAspectRatio, Qt.SmoothTransformation)\n )\n\n def _mgui_set_value(self, val: np.ndarray) -> None:\n image = QImage(val, val.shape[1], val.shape[0], QImage.Format_RGBA8888)\n self._pixmap = QPixmap.fromImage(image)\n self._rescale()\n\n\nclass LineEdit(QBaseStringWidget):\n _qwidget: QtW.QLineEdit\n\n def __init__(self):\n super().__init__(QtW.QLineEdit, \"text\", \"setText\", \"textChanged\")\n\n\nclass LiteralEvalLineEdit(QBaseStringWidget):\n _qwidget: QtW.QLineEdit\n\n def __init__(self):\n super().__init__(QtW.QLineEdit, \"text\", \"setText\", \"textChanged\")\n\n def _post_get_hook(self, value):\n from ast import literal_eval\n\n return literal_eval(value)\n\n\nclass TextEdit(QBaseStringWidget, _protocols.SupportsReadOnly):\n def __init__(self):\n super().__init__(QtW.QTextEdit, \"toPlainText\", \"setText\", \"textChanged\")\n\n def _mgui_set_read_only(self, value: bool) -> None:\n self._qwidget.setReadOnly(value)\n\n def _mgui_get_read_only(self) -> bool:\n return self._qwidget.isReadOnly()\n\n\n# NUMBERS\n\n\nclass QBaseRangedWidget(QBaseValueWidget, _protocols.RangedWidgetProtocol):\n \"\"\"Provides min/max/step implementations.\"\"\"\n\n _qwidget: QtW.QDoubleSpinBox | QtW.QSpinBox | QtW.QSlider\n _precision: float = 1\n\n def __init__(self, qwidg):\n super().__init__(qwidg, \"value\", \"setValue\", \"valueChanged\")\n\n def _mgui_get_min(self) -> float:\n \"\"\"Get the minimum possible value.\"\"\"\n val = self._qwidget.minimum()\n return self._post_get_hook(val)\n\n def _mgui_set_min(self, value: float):\n \"\"\"Set the minimum possible value.\"\"\"\n self._update_precision(minimum=value)\n val = self._pre_set_hook(value)\n self._qwidget.setMinimum(val)\n\n def _mgui_get_max(self) -> float:\n \"\"\"Set the maximum possible value.\"\"\"\n val = self._qwidget.maximum()\n return self._post_get_hook(val)\n\n def _mgui_set_max(self, value: float):\n \"\"\"Set the maximum possible value.\"\"\"\n self._update_precision(maximum=value)\n val = self._pre_set_hook(value)\n self._qwidget.setMaximum(val)\n\n def _mgui_get_step(self) -> float:\n \"\"\"Get the step size.\"\"\"\n val = self._qwidget.singleStep()\n return self._post_get_hook(val)\n\n def _mgui_set_step(self, value: float):\n \"\"\"Set the step size.\"\"\"\n self._update_precision(step=value)\n val = self._pre_set_hook(value)\n self._qwidget.setSingleStep(val)\n\n def _update_precision(self, **kwargs):\n pass\n\n\n# BUTTONS\n\n\nclass QBaseButtonWidget(QBaseValueWidget, _protocols.SupportsText):\n _qwidget: QtW.QCheckBox | QtW.QPushButton | QtW.QRadioButton | QtW.QToolButton\n\n def __init__(self, qwidg):\n super().__init__(qwidg, \"isChecked\", \"setChecked\", \"toggled\")\n\n def _mgui_set_text(self, value: str) -> None:\n \"\"\"Set text.\"\"\"\n self._qwidget.setText(str(value))\n\n def _mgui_get_text(self) -> str:\n \"\"\"Get text.\"\"\"\n return self._qwidget.text()\n\n\nclass PushButton(QBaseButtonWidget):\n def __init__(self):\n QBaseValueWidget.__init__(\n self, QtW.QPushButton, \"isChecked\", \"setChecked\", \"clicked\"\n )\n\n\nclass CheckBox(QBaseButtonWidget):\n def __init__(self):\n super().__init__(QtW.QCheckBox)\n\n\nclass RadioButton(QBaseButtonWidget):\n def __init__(self):\n super().__init__(QtW.QRadioButton)\n\n\n# class ToolButton(QBaseButtonWidget):\n# def __init__(self):\n# super().__init__(QtW.QToolButton)\n\n\nclass Container(\n QBaseWidget, _protocols.ContainerProtocol, _protocols.SupportsOrientation\n):\n def __init__(self, layout=\"vertical\"):\n QBaseWidget.__init__(self, QtW.QWidget)\n if layout == \"horizontal\":\n self._layout: QtW.QLayout = QtW.QHBoxLayout()\n else:\n self._layout = QtW.QVBoxLayout()\n self._qwidget.setLayout(self._layout)\n\n def _mgui_insert_widget(self, position: int, widget: Widget):\n self._layout.insertWidget(position, widget.native)\n\n def _mgui_remove_widget(self, widget: Widget):\n self._layout.removeWidget(widget.native)\n widget.native.setParent(None)\n\n def _mgui_get_margins(self) -> tuple[int, int, int, int]:\n m = self._layout.contentsMargins()\n return m.left(), m.top(), m.right(), m.bottom()\n\n def _mgui_set_margins(self, margins: tuple[int, int, int, int]) -> None:\n self._layout.setContentsMargins(*margins)\n\n def _mgui_set_orientation(self, value) -> None:\n \"\"\"Set orientation, value will be 'horizontal' or 'vertical'.\"\"\"\n raise NotImplementedError(\n \"Sorry, changing orientation after instantiation \"\n \"is not yet implemented for Qt.\"\n )\n\n def _mgui_get_orientation(self) -> str:\n \"\"\"Set orientation, return either 'horizontal' or 'vertical'.\"\"\"\n if isinstance(self, QtW.QHBoxLayout):\n return \"horizontal\"\n else:\n return \"vertical\"\n\n\nclass MainWindow(Container):\n def __init__(self, layout=\"vertical\"):\n super().__init__(layout=layout)\n self._main_window = QtW.QMainWindow()\n self._main_window.setCentralWidget(self._qwidget)\n self._main_menu = self._main_window.menuBar()\n self._menus: dict[str, QtW.QMenu] = {}\n\n def _mgui_get_visible(self):\n return self._main_window.isVisible()\n\n def _mgui_set_visible(self, value: bool):\n self._main_window.setVisible(value)\n\n def _mgui_get_native_widget(self) -> QtW.QMainWindow:\n return self._main_window\n\n def _mgui_create_menu_item(\n self, menu_name: str, action_name: str, callback=None, shortcut=None\n ):\n menu = self._menus.setdefault(\n menu_name, self._main_menu.addMenu(f\"&{menu_name}\")\n )\n action = QtW.QAction(action_name, self._main_window)\n if shortcut is not None:\n action.setShortcut(shortcut)\n if callback is not None:\n action.triggered.connect(callback)\n menu.addAction(action)\n\n\nclass SpinBox(QBaseRangedWidget):\n def __init__(self):\n super().__init__(QtW.QSpinBox)\n\n def _mgui_set_value(self, value) -> None:\n super()._mgui_set_value(int(value))\n\n\nclass FloatSpinBox(QBaseRangedWidget):\n def __init__(self):\n super().__init__(QtW.QDoubleSpinBox)\n\n def _mgui_set_value(self, value) -> None:\n super()._mgui_set_value(float(value))\n\n def _mgui_set_step(self, value: float):\n \"\"\"Set the step size.\"\"\"\n if value and value < 1 * 10 ** -self._qwidget.decimals():\n self._qwidget.setDecimals(math.ceil(abs(math.log10(value))))\n self._qwidget.setSingleStep(value)\n\n\nclass _Slider(QBaseRangedWidget, _protocols.SupportsOrientation):\n _qwidget: QtW.QSlider\n\n def __init__(self, qwidg=QtW.QSlider, **kwargs):\n super().__init__(qwidg)\n self._mgui_set_orientation(\"horizontal\")\n self._mgui_set_readout_visibility(kwargs.get(\"readout\", True))\n self._mgui_set_orientation(kwargs.get(\"orientation\", \"horizontal\"))\n\n def _mgui_set_orientation(self, value) -> Any:\n \"\"\"Get current value of the widget.\"\"\"\n orientation = Qt.Vertical if value == \"vertical\" else Qt.Horizontal\n self._qwidget.setOrientation(orientation)\n\n def _mgui_get_orientation(self) -> Any:\n \"\"\"Get current value of the widget.\"\"\"\n orientation = self._qwidget.orientation()\n return \"vertical\" if orientation == Qt.Vertical else \"horizontal\"\n\n def _mgui_set_readout_visibility(self, value: bool):\n raise NotImplementedError()\n\n def _mgui_get_tracking(self) -> bool:\n # Progressbar also uses this base, but doesn't have tracking\n if hasattr(self._qwidget, \"hasTracking\"):\n return self._qwidget.hasTracking()\n return False\n\n def _mgui_set_tracking(self, value: bool) -> None:\n # Progressbar also uses this base, but doesn't have tracking\n if hasattr(self._qwidget, \"setTracking\"):\n self._qwidget.setTracking(value)\n\n\nclass Slider(_Slider):\n _qwidget: QtW.QSlider\n _readout = QtW.QSpinBox\n\n def __init__(self, qwidg=QtW.QSlider, **kwargs):\n self._container = QtW.QWidget()\n self._readout_widget = self._readout()\n super().__init__(qwidg)\n\n self._readout_widget.setButtonSymbols(self._readout_widget.NoButtons)\n self._readout_widget.setStyleSheet(\"background:transparent; border: 0;\")\n\n self._qwidget.valueChanged.connect(self._on_slider_change)\n self._readout_widget.editingFinished.connect(self._on_readout_change)\n\n def _mgui_set_orientation(self, value: str) -> None:\n \"\"\"Set orientation, value will be 'horizontal' or 'vertical'.\"\"\"\n if value == \"vertical\":\n layout = QtW.QVBoxLayout()\n self._qwidget.setOrientation(Qt.Vertical)\n layout.addWidget(self._qwidget, alignment=Qt.AlignHCenter)\n layout.addWidget(self._readout_widget, alignment=Qt.AlignHCenter)\n self._readout_widget.setAlignment(Qt.AlignCenter)\n layout.setContentsMargins(0, 0, 0, 0)\n layout.setSpacing(1)\n else:\n layout = QtW.QHBoxLayout()\n self._qwidget.setOrientation(Qt.Horizontal)\n layout.addWidget(self._qwidget)\n layout.addWidget(self._readout_widget)\n self._readout_widget.setAlignment(Qt.AlignRight)\n right_margin = 0 if self._readout_widget.isVisible() else 4\n layout.setContentsMargins(0, 0, right_margin, 0)\n layout.setSpacing(4)\n old_layout = self._container.layout()\n if old_layout is not None:\n QtW.QWidget().setLayout(old_layout)\n self._container.setLayout(layout)\n\n def _on_slider_change(self, value):\n self._readout_widget.setValue(self._post_get_hook(value))\n\n def _on_readout_change(self):\n self._qwidget.setValue(self._pre_set_hook(self._readout_widget.value()))\n\n def _mgui_get_native_widget(self) -> QtW.QWidget:\n return self._container\n\n def _mgui_set_min(self, value: float):\n \"\"\"Set the minimum possible value.\"\"\"\n super()._mgui_set_min(value)\n self._readout_widget.setMinimum(value)\n\n def _mgui_set_max(self, value: float):\n \"\"\"Set the maximum possible value.\"\"\"\n super()._mgui_set_max(value)\n self._readout_widget.setMaximum(value)\n\n def _mgui_set_step(self, value: float):\n \"\"\"Set the step size.\"\"\"\n super()._mgui_set_step(value)\n self._readout_widget.setSingleStep(value)\n\n def _mgui_set_readout_visibility(self, value: bool):\n self._readout_widget.show() if value else self._readout_widget.hide()\n\n\nclass FloatSlider(Slider):\n _readout = QtW.QDoubleSpinBox\n _precision = 1e6\n\n def _update_precision(self, minimum=None, maximum=None, step=None):\n \"\"\"Called when min/max/step is changed.\n\n _precision is the factor that converts from integer representation in the slider\n widget, to the actual float representation needed.\n \"\"\"\n orig = self._precision\n\n if minimum is not None or maximum is not None:\n _min = minimum or self._mgui_get_min()\n _max = maximum or self._mgui_get_max()\n\n # make sure val * precision is within int32 overflow limit for Qt\n val = max([abs(_min), abs(_max)])\n while abs(self._precision * val) >= 2 ** 32 // 2:\n self._precision *= 0.1\n elif step:\n while step < (1 / self._precision):\n self._precision *= 10\n\n ratio = self._precision / orig\n if ratio != 1:\n self._mgui_set_value(self._mgui_get_value() * ratio)\n if not step:\n self._mgui_set_max(self._mgui_get_max() * ratio)\n self._mgui_set_min(self._mgui_get_min() * ratio)\n # self._mgui_set_step(self._mgui_get_step() * ratio)\n\n def _post_get_hook(self, value):\n return value / self._precision\n\n def _pre_set_hook(self, value):\n return int(value * self._precision)\n\n def _mgui_bind_change_callback(self, callback):\n def _converted_value(value):\n callback(self._post_get_hook(value))\n\n self._qwidget.valueChanged.connect(_converted_value)\n\n\nclass ProgressBar(_Slider):\n _qwidget: QtW.QProgressBar\n\n def __init__(self, **kwargs):\n super().__init__(QtW.QProgressBar)\n\n def _mgui_get_step(self) -> float:\n \"\"\"Get the step size.\"\"\"\n return 1\n\n def _mgui_set_step(self, value: float):\n \"\"\"Set the step size.\"\"\"\n\n def _mgui_set_readout_visibility(self, value: bool):\n self._qwidget.setTextVisible(value)\n\n\nclass ComboBox(QBaseValueWidget, _protocols.CategoricalWidgetProtocol):\n _qwidget: QtW.QComboBox\n\n def __init__(self):\n super().__init__(QtW.QComboBox, \"isChecked\", \"setCurrentIndex\", \"\")\n self._qwidget.currentIndexChanged.connect(self._emit_data)\n\n def _emit_data(self, index: int):\n self._event_filter.valueChanged.emit(self._qwidget.itemData(index))\n\n def _mgui_bind_change_callback(self, callback):\n self._event_filter.valueChanged.connect(callback)\n\n def _mgui_get_count(self) -> int:\n \"\"\"Return the number of items in the dropdown.\"\"\"\n return self._qwidget.count()\n\n def _mgui_get_choice(self, choice_name: str) -> Any:\n item_index = self._qwidget.findText(choice_name)\n return None if item_index == -1 else self._qwidget.itemData(item_index)\n\n def _mgui_get_current_choice(self) -> str:\n return self._qwidget.itemText(self._qwidget.currentIndex())\n\n def _mgui_get_value(self) -> Any:\n return self._qwidget.itemData(self._qwidget.currentIndex())\n\n def _mgui_set_value(self, value) -> None:\n self._qwidget.setCurrentIndex(self._qwidget.findData(value))\n\n def _mgui_set_choice(self, choice_name: str, data: Any) -> None:\n \"\"\"Set data for ``choice_name``.\"\"\"\n item_index = self._qwidget.findText(choice_name)\n # if it's not in the list, add a new item\n if item_index == -1:\n self._qwidget.addItem(choice_name, data)\n # otherwise update its data\n else:\n self._qwidget.setItemData(item_index, data)\n\n def _mgui_set_choices(self, choices: Iterable[tuple[str, Any]]) -> None:\n \"\"\"Set current items in categorical type ``widget`` to ``choices``.\"\"\"\n choices_ = list(choices)\n if not choices_:\n self._qwidget.clear()\n return\n\n choice_names = [x[0] for x in choices_]\n # remove choices that no longer exist\n for i in range(self._qwidget.count()):\n if self._qwidget.itemText(i) not in choice_names:\n self._qwidget.removeItem(i)\n # update choices\n for name, data in choices_:\n self._mgui_set_choice(name, data)\n\n # if the currently selected item is not in the new set,\n # remove it and select the first item in the list\n current = self._qwidget.itemText(self._qwidget.currentIndex())\n if current not in choice_names:\n first = choice_names[0]\n self._qwidget.setCurrentIndex(self._qwidget.findText(first))\n self._qwidget.removeItem(self._qwidget.findText(current))\n\n def _mgui_del_choice(self, choice_name: str) -> None:\n \"\"\"Delete choice_name.\"\"\"\n item_index = self._qwidget.findText(choice_name)\n if item_index >= 0:\n self._qwidget.removeItem(item_index)\n\n def _mgui_get_choices(self) -> tuple[tuple[str, Any], ...]:\n \"\"\"Get available choices.\"\"\"\n return tuple(\n (self._qwidget.itemText(i), self._qwidget.itemData(i))\n for i in range(self._qwidget.count())\n )\n\n\nclass Select(QBaseValueWidget, _protocols.CategoricalWidgetProtocol):\n _qwidget: QtW.QListWidget\n\n def __init__(self):\n super().__init__(QtW.QListWidget, \"isChecked\", \"setCurrentIndex\", \"\")\n self._qwidget.itemSelectionChanged.connect(self._emit_data)\n self._qwidget.setSelectionMode(QtW.QAbstractItemView.ExtendedSelection)\n\n def _emit_data(self):\n data = self._qwidget.selectedItems()\n self._event_filter.valueChanged.emit([d.data(Qt.UserRole) for d in data])\n\n def _mgui_bind_change_callback(self, callback):\n self._event_filter.valueChanged.connect(callback)\n\n def _mgui_get_count(self) -> int:\n \"\"\"Return the number of items in the dropdown.\"\"\"\n return self._qwidget.count()\n\n def _mgui_get_choice(self, choice_name: str) -> list[Any]:\n items = self._qwidget.findItems(choice_name, Qt.MatchExactly)\n return [i.data(Qt.UserRole) for i in items]\n\n def _mgui_get_current_choice(self) -> list[str]: # type: ignore[override]\n return [i.text() for i in self._qwidget.selectedItems()]\n\n def _mgui_get_value(self) -> Any:\n return [i.data(Qt.UserRole) for i in self._qwidget.selectedItems()]\n\n def _mgui_set_value(self, value) -> None:\n if not isinstance(value, (list, tuple)):\n value = [value]\n for i in range(self._qwidget.count()):\n item = self._qwidget.item(i)\n item.setSelected(item.data(Qt.UserRole) in value)\n\n def _mgui_set_choice(self, choice_name: str, data: Any) -> None:\n \"\"\"Set data for ``choice_name``.\"\"\"\n items = self._qwidget.findItems(choice_name, Qt.MatchExactly)\n # if it's not in the list, add a new item\n if not items:\n item = QtW.QListWidgetItem(choice_name)\n item.setData(Qt.UserRole, data)\n self._qwidget.addItem(item)\n # otherwise update its data\n else:\n for item in items:\n item.setData(Qt.UserRole, data)\n\n def _mgui_set_choices(self, choices: Iterable[tuple[str, Any]]) -> None:\n \"\"\"Set current items in categorical type ``widget`` to ``choices``.\"\"\"\n choices_ = list(choices)\n if not choices_:\n self._qwidget.clear()\n return\n\n choice_names = [x[0] for x in choices_]\n # remove choices that no longer exist\n for i in reversed(range(self._qwidget.count())):\n if self._qwidget.item(i).text() not in choice_names:\n self._qwidget.takeItem(i)\n # update choices\n for name, data in choices_:\n self._mgui_set_choice(name, data)\n\n def _mgui_del_choice(self, choice_name: str) -> None:\n \"\"\"Delete choice_name.\"\"\"\n for i in reversed(range(self._qwidget.count())):\n if self._qwidget.item(i).text() == choice_name:\n self._qwidget.takeItem(i)\n\n def _mgui_get_choices(self) -> tuple[tuple[str, Any], ...]:\n \"\"\"Get available choices.\"\"\"\n return tuple(\n (self._qwidget.item(i).text(), self._qwidget.item(i).data(Qt.UserRole))\n for i in range(self._qwidget.count())\n )\n\n\nclass RadioButtons(\n QBaseValueWidget,\n _protocols.CategoricalWidgetProtocol,\n _protocols.SupportsOrientation,\n):\n _qwidget: QtW.QGroupBox\n\n def __init__(self):\n super().__init__(QtW.QGroupBox, \"\", \"\", \"\")\n self._btn_group = QtW.QButtonGroup(self._qwidget)\n self._mgui_set_orientation(\"vertical\")\n self._btn_group.buttonToggled.connect(self._emit_data)\n\n def _emit_data(self, btn, checked):\n if checked:\n self._event_filter.valueChanged.emit(self._mgui_get_value())\n\n def _mgui_bind_change_callback(self, callback):\n self._event_filter.valueChanged.connect(callback)\n\n def _mgui_set_orientation(self, value: str) -> None:\n \"\"\"Set orientation, value will be 'horizontal' or 'vertical'.\"\"\"\n new_layout = QtW.QHBoxLayout() if value == \"horizontal\" else QtW.QVBoxLayout()\n for btn in self._btn_group.buttons():\n new_layout.addWidget(btn)\n old_layout = self._qwidget.layout()\n if old_layout is not None:\n QtW.QWidget().setLayout(old_layout)\n self._qwidget.setLayout(new_layout)\n self._qwidget.layout().setContentsMargins(4, 4, 4, 4)\n\n def _mgui_get_orientation(self) -> str:\n \"\"\"Get orientation, return either 'horizontal' or 'vertical'.\"\"\"\n if isinstance(self._qwidget.layout(), QtW.QVBoxLayout):\n return \"vertical\"\n else:\n return \"horizontal\"\n\n def _mgui_get_current_choice(self) -> str:\n btn = self._btn_group.checkedButton()\n return btn.text() if btn else None\n\n def _mgui_get_value(self) -> Any:\n btn = self._btn_group.checkedButton()\n return btn._data if btn else None\n\n def _mgui_set_value(self, value) -> None:\n for btn in self._btn_group.buttons():\n if btn._data == value:\n btn.setChecked(True)\n break # exclusive\n\n def _mgui_get_count(self) -> int:\n \"\"\"Return the number of items in the dropdown.\"\"\"\n return len(self._btn_group.buttons())\n\n def _mgui_get_choice(self, choice_name: str) -> Any:\n for btn in self._btn_group.buttons():\n if btn.text() == choice_name:\n return btn._data\n return None\n\n def _add_button(self, label: str, data: Any = None):\n btn = QtW.QRadioButton(label, self._qwidget)\n btn._data = data\n self._btn_group.addButton(btn)\n self._qwidget.layout().addWidget(btn)\n\n def _remove_button(self, btn):\n self._btn_group.removeButton(btn)\n btn.setParent(None)\n btn.deleteLater()\n\n def _mgui_set_choice(self, choice_name: str, data: Any) -> None:\n \"\"\"Set data for ``choice_name``.\"\"\"\n for btn in self._btn_group.buttons():\n if btn.text() == choice_name:\n # otherwise update its data\n btn._data = data\n else:\n # if it's not in the list, add a new item\n self._add_button(choice_name, data)\n\n def _mgui_del_choice(self, choice_name: str) -> None:\n \"\"\"Delete choice_name.\"\"\"\n for btn in self._btn_group.buttons():\n if btn.text() == choice_name:\n self._remove_button(btn)\n break\n\n def _mgui_get_choices(self) -> tuple[tuple[str, Any], ...]:\n \"\"\"Get available choices.\"\"\"\n return tuple((str(btn.text()), btn._data) for btn in self._btn_group.buttons())\n\n def _mgui_set_choices(self, choices: Iterable[tuple[str, Any]]) -> None:\n \"\"\"Set current items in categorical type ``widget`` to ``choices``.\"\"\"\n current = self._mgui_get_value()\n for btn in self._btn_group.buttons():\n self._remove_button(btn)\n\n for c in choices:\n self._add_button(*c)\n self._mgui_set_value(current)\n\n\nclass DateTimeEdit(QBaseValueWidget):\n def __init__(self):\n super().__init__(QtW.QDateTimeEdit, \"\", \"setDateTime\", \"dateTimeChanged\")\n\n def _mgui_get_value(self):\n try:\n return self._qwidget.dateTime().toPython()\n except (TypeError, AttributeError):\n return self._qwidget.dateTime().toPyDateTime()\n\n\nclass DateEdit(QBaseValueWidget):\n def __init__(self):\n super().__init__(QtW.QDateEdit, \"\", \"setDate\", \"dateChanged\")\n\n def _mgui_get_value(self):\n try:\n return self._qwidget.date().toPython()\n except (TypeError, AttributeError):\n return self._qwidget.date().toPyDate()\n\n\nclass TimeEdit(QBaseValueWidget):\n def __init__(self):\n super().__init__(QtW.QTimeEdit, \"\", \"setTime\", \"timeChanged\")\n\n def _mgui_get_value(self):\n try:\n return self._qwidget.time().toPython()\n except (TypeError, AttributeError):\n return self._qwidget.time().toPyTime()\n\n\nQFILE_DIALOG_MODES = {\n FileDialogMode.EXISTING_FILE: QtW.QFileDialog.getOpenFileName,\n FileDialogMode.EXISTING_FILES: QtW.QFileDialog.getOpenFileNames,\n FileDialogMode.OPTIONAL_FILE: QtW.QFileDialog.getSaveFileName,\n FileDialogMode.EXISTING_DIRECTORY: QtW.QFileDialog.getExistingDirectory,\n}\n\n\ndef show_file_dialog(\n mode: str | FileDialogMode = FileDialogMode.EXISTING_FILE,\n caption: str = None,\n start_path: str = None,\n filter: str = None,\n parent=None,\n) -> str | None:\n show_dialog = QFILE_DIALOG_MODES[FileDialogMode(mode)]\n if mode is FileDialogMode.EXISTING_DIRECTORY:\n result = show_dialog(parent, caption, start_path)\n else:\n result, _ = show_dialog(parent, caption, start_path, filter)\n return result or None\n\n\ndef get_text_width(text) -> int:\n \"\"\"Return the width required to render ``text`` (including rich text elements).\"\"\"\n if qtpy.PYSIDE2:\n from qtpy.QtGui import Qt as _Qt\n else:\n from qtpy.QtCore import Qt as _Qt\n\n if _Qt.mightBeRichText(text):\n doc = QTextDocument()\n doc.setHtml(text)\n return doc.size().width()\n else:\n fm = QFontMetrics(QFont(\"\", 0))\n return fm.boundingRect(text).width() + 5\n\n\ndef _maybefloat(item):\n if not item:\n return None\n num = item.text() if hasattr(item, \"text\") else item\n try:\n return int(num) if num.isdigit() else float(num)\n except ValueError:\n return num\n\n\nclass _QTableExtended(QtW.QTableWidget):\n _read_only: bool = False\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setItemDelegate(_ItemDelegate(parent=self))\n\n def _copy_to_clipboard(self):\n selranges = self.selectedRanges()\n if not selranges:\n return\n if len(selranges) > 1:\n import warnings\n\n warnings.warn(\n \"Multiple table selections detected: \"\n \"only the first (upper left) selection will be copied\"\n )\n\n # copy first selection range\n sel = selranges[0]\n lines = []\n for r in range(sel.topRow(), sel.bottomRow() + 1):\n cells = []\n for c in range(sel.leftColumn(), sel.rightColumn() + 1):\n item = self.item(r, c)\n cells.append(item.text()) if hasattr(item, \"text\") else \"\"\n lines.append(\"\\t\".join(cells))\n\n if lines:\n QtW.QApplication.clipboard().setText(\"\\n\".join(lines))\n\n def _paste_from_clipboard(self):\n if self._read_only:\n return\n\n sel_idx = self.selectedIndexes()\n if not sel_idx:\n return\n text = QtW.QApplication.clipboard().text()\n if not text:\n return\n\n # paste in the text\n row0, col0 = sel_idx[0].row(), sel_idx[0].column()\n data = [line.split(\"\\t\") for line in text.splitlines()]\n if (row0 + len(data)) > self.rowCount():\n self.setRowCount(row0 + len(data))\n if data and (col0 + len(data[0])) > self.columnCount():\n self.setColumnCount(col0 + len(data[0]))\n for r, line in enumerate(data):\n for c, cell in enumerate(line):\n try:\n self.item(row0 + r, col0 + c).setText(str(cell))\n except AttributeError:\n self.setItem(row0 + r, col0 + c, QtW.QTableWidgetItem(str(cell)))\n\n # select what was just pasted\n selrange = QtW.QTableWidgetSelectionRange(row0, col0, row0 + r, col0 + c)\n self.clearSelection()\n self.setRangeSelected(selrange, True)\n\n def _delete_selection(self):\n if self._read_only:\n return\n\n for item in self.selectedItems():\n try:\n item.setText(\"\")\n except AttributeError:\n pass\n\n def keyPressEvent(self, e: QKeyEvent):\n if e.modifiers() & Qt.ControlModifier and e.key() == Qt.Key_C:\n return self._copy_to_clipboard()\n if e.modifiers() & Qt.ControlModifier and e.key() == Qt.Key_V:\n return self._paste_from_clipboard()\n if e.modifiers() & Qt.ControlModifier and e.key() == Qt.Key_X:\n self._copy_to_clipboard()\n return self._delete_selection()\n if e.key() in (Qt.Key_Delete, Qt.Key_Backspace):\n return self._delete_selection()\n return super().keyPressEvent(e)\n\n\nclass Table(QBaseWidget, _protocols.TableWidgetProtocol):\n _qwidget: _QTableExtended\n _DATA_ROLE: int = 255\n _EDITABLE = QtW.QTableWidget.EditKeyPressed | QtW.QTableWidget.DoubleClicked\n _READ_ONLY = QtW.QTableWidget.NoEditTriggers\n\n def __init__(self):\n super().__init__(_QTableExtended)\n header = self._qwidget.horizontalHeader()\n # avoid strange AttributeError on CI\n if hasattr(header, \"setSectionResizeMode\"):\n header.setSectionResizeMode(QtW.QHeaderView.Stretch)\n # self._qwidget.horizontalHeader().setSectionsMovable(True) # tricky!!\n header.setSectionResizeMode(QtW.QHeaderView.Interactive)\n self._qwidget.itemChanged.connect(self._update_item_data_with_text)\n\n def _mgui_set_read_only(self, value: bool) -> None:\n value = bool(value)\n self._qwidget._read_only = value\n if value:\n self._qwidget.setEditTriggers(self._READ_ONLY)\n else:\n self._qwidget.setEditTriggers(self._EDITABLE)\n\n def _mgui_get_read_only(self) -> bool:\n return self._qwidget._read_only\n\n def _update_item_data_with_text(self, item: QtW.QTableWidgetItem):\n self._qwidget.blockSignals(True)\n item.setData(self._DATA_ROLE, _maybefloat(item.text()))\n self._qwidget.blockSignals(False)\n\n def _mgui_set_row_count(self, nrows: int) -> None:\n \"\"\"Set the number of rows in the table. (Create/delete as needed).\"\"\"\n self._qwidget.setRowCount(nrows)\n\n def _mgui_set_column_count(self, ncols: int) -> None:\n \"\"\"Set the number of columns in the table. (Create/delete as needed).\"\"\"\n self._qwidget.setColumnCount(ncols)\n\n def _mgui_get_column_count(self) -> int:\n return self._qwidget.columnCount()\n\n def _mgui_get_row_count(self) -> int:\n return self._qwidget.rowCount()\n\n def _mgui_remove_row(self, row: int) -> None:\n self._qwidget.removeRow(row)\n\n def _mgui_remove_column(self, column: int) -> None:\n self._qwidget.removeColumn(column)\n\n def _mgui_get_cell(self, row: int, col: int) -> Any:\n \"\"\"Get current value of the widget.\"\"\"\n item = self._qwidget.item(row, col)\n if item:\n return item.data(self._DATA_ROLE)\n widget = self._qwidget.cellWidget(row, col)\n if widget:\n return getattr(widget, \"_magic_widget\", widget)\n\n def _mgui_set_cell(self, row: int, col: int, value: Any) -> None:\n \"\"\"Set current value of the widget.\"\"\"\n if value is None:\n self._qwidget.setItem(row, col, None)\n self._qwidget.removeCellWidget(row, col)\n return\n if isinstance(value, Widget):\n self._qwidget.setCellWidget(row, col, value.native)\n return\n item = QtW.QTableWidgetItem(str(value))\n item.setData(self._DATA_ROLE, value)\n self._qwidget.setItem(row, col, item)\n\n def _mgui_get_row_headers(self) -> tuple:\n \"\"\"Get current row headers of the widget.\"\"\"\n # ... 'PySide2.QtWidgets.QTableWidgetItem' object has no attribute 'visualIndex'\n # on some linux... so removing the visualIndex() for row headers... this means\n # we can't enable row drag/drop without fixing this\n indices = range(self._qwidget.rowCount())\n items = (self._qwidget.verticalHeaderItem(i) for i in indices)\n headers = (item.text() for item in items if item)\n return tuple(_maybefloat(x) for x in headers)\n\n def _mgui_set_row_headers(self, headers: Sequence) -> None:\n \"\"\"Set current row headers of the widget.\"\"\"\n self._qwidget.setVerticalHeaderLabels(tuple(map(str, headers)))\n\n def _mgui_get_column_headers(self) -> tuple:\n \"\"\"Get current column headers of the widget.\"\"\"\n horiz_header = self._qwidget.horizontalHeader()\n ncols = self._qwidget.columnCount()\n # visual index allows for column drag/drop, though it's currently not enabled.\n indices = (horiz_header.visualIndex(i) for i in range(ncols))\n items = (self._qwidget.horizontalHeaderItem(i) for i in indices)\n headers = (item.text() for item in items if item)\n return tuple(_maybefloat(x) for x in headers)\n\n def _mgui_set_column_headers(self, headers: Sequence) -> None:\n \"\"\"Set current column headers of the widget.\"\"\"\n self._qwidget.setHorizontalHeaderLabels(tuple(map(str, headers)))\n\n def _mgui_bind_row_headers_change_callback(self, callback) -> None:\n \"\"\"Bind callback to row headers change event.\"\"\"\n raise NotImplementedError()\n\n def _mgui_bind_column_headers_change_callback(self, callback) -> None:\n \"\"\"Bind callback to column headers change event.\"\"\"\n raise NotImplementedError()\n\n def _mgui_bind_change_callback(self, callback):\n \"\"\"Bind callback to event of changing any cell.\"\"\"\n\n def _item_callback(item, callback=callback):\n col_head = item.tableWidget().horizontalHeaderItem(item.column())\n col_head = col_head.text() if col_head is not None else \"\"\n row_head = item.tableWidget().verticalHeaderItem(item.row())\n row_head = row_head.text() if row_head is not None else \"\"\n data = {\n \"data\": item.data(self._DATA_ROLE),\n \"row\": item.row(),\n \"column\": item.column(),\n \"column_header\": col_head,\n \"row_header\": row_head,\n }\n callback(data)\n\n self._qwidget.itemChanged.connect(_item_callback)\n\n\nclass _ItemDelegate(QtW.QStyledItemDelegate):\n \"\"\"Displays table widget items with properly formatted numbers.\"\"\"\n\n def __init__(self, *args, ndigits: int = 4, **kwargs):\n super().__init__(*args, **kwargs)\n self.ndigits = ndigits\n\n def displayText(self, value, locale):\n value = self._format_number(value)\n return super().displayText(value, locale)\n\n def _format_number(self, text: str) -> str:\n \"\"\"convert string to int or float if possible\"\"\"\n try:\n value: int | float | None = int(text)\n except ValueError:\n try:\n value = float(text)\n except ValueError:\n value = None\n\n if isinstance(value, (int, float)):\n dgt = self.ndigits\n if 0.1 <= abs(value) < 10 ** (dgt + 1) or value == 0:\n text = str(value) if isinstance(value, int) else f\"{value:.{dgt}f}\"\n else:\n text = f\"{value:.{dgt-1}e}\"\n\n return text\n"
] |
[
[
"numpy.frombuffer",
"numpy.array"
]
] |
m-rath/songs
|
[
"32f7c5a99c17e87f299dee64990b5467770c56fc"
] |
[
"song_suggester/planB_funcs.py"
] |
[
"\"\"\"some objects and functions supporting html actions and routes\"\"\"\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport matplotlib as plt \nfrom scipy.spatial import distance\n# from sqlalchemy import func, distinct\n# from sqlalchemy.sql import expression #(.exists, .select, ...)\nfrom .spotify_client import *\n\n\n# unpickle a trained kmeans algorithm and cluster-distances \npickled_kmeans = r\"kmeans_pipe.sav\"\npickled_index = r\"df_index.sav\"\npickled_locs = r\"song_space_locs.sav\"\nkmeans_pipe = pickle.load(open(pickled_kmeans, 'rb'))\ndf_index = pickle.load(open(pickled_index, 'rb'))\nsong_space_locs = pickle.load(open(pickled_locs, 'rb'))\n\n\ndef suggest_ids(song_name, artist_name, count=100):\n \"\"\"Compares a track to ~440,000 others, based on 13 numeric audio features, and returns the spotify_ids of 35 songs with similar cluster-distance coordinates; it would be cool if a button press would display the next closest set. It would be cooler if matplotlib displayed a 3D plot, with 3 drop-down menus for choosing any 3 features (of 13) for plot axes (or a 3D tSNE plot, not with audio features but with projections to abstract 3D space); and if the color of input song were bright color, similar to neighbors displayed in table, but different from the faded grey others\"\"\"\n song_id, artist_id = retrieve_spotify_ids(song_name, artist_name)\n \n features = retrieve_audio_features(song_id) \n feats = ['danceability','energy','key','loudness','mode','speechiness','acousticness','instrumentalness','liveness','valence','tempo','duration_ms','time_signature']\n\n model_input = [features[0][feat] for feat in feats]\n song_space_base = kmeans_pipe.transform([model_input])\n dists = distance.cdist(song_space_locs, song_space_base, 'cityblock')\n dists = pd.DataFrame(dists, index=df_index).sort_values(by=0)[:count] #top10\n spotify_ids = dists.index\n\n return spotify_ids\n\n\ndef relevant_genres(tracks):\n artist_names = [track.artist_name for track in tracks]\n genres_nested_lists = [retrieve_genres(art) for art in artist_names]\n genre_list = [g for g_list in genres_nested_lists for g in g_list]\n\n return genre_list\n"
] |
[
[
"scipy.spatial.distance.cdist",
"pandas.DataFrame"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.