repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
danruod/FS-DGPM
|
[
"8cc57aed8caf0f8b3c1c02db0b88895d7ed3d7b5"
] |
[
"model/fsdgpm.py"
] |
[
"import numpy as np\nimport torch\nimport math\nfrom model.base import *\n\nclass Net(BaseNet):\n def __init__(self, n_inputs, n_outputs, n_tasks, args):\n super(Net, self).__init__(n_inputs, n_outputs, n_tasks, args)\n\n # steps for sharpness\n self.inner_steps = args.inner_batches\n\n # eta1: update step size of weight perturbation\n self.eta1 = args.eta1\n\n # eta2: learning rate of lambda(soft weight for basis)\n self.eta2 = args.eta2\n\n\n def forward(self, x, t):\n output = self.net.forward(x)\n\n if self.net.multi_head:\n # make sure we predict classes within the current task\n offset1, offset2 = self.compute_offsets(t)\n if offset1 > 0:\n output[:, :offset1].data.fill_(-10e10)\n if offset2 < self.n_outputs:\n output[:, offset2:self.n_outputs].data.fill_(-10e10)\n\n return output\n\n def observe(self, x, y, t):\n if t != self.current_task:\n self.current_task = t\n\n for pass_itr in range(self.glances):\n self.iter += 1\n self.zero_grads()\n\n perm = torch.randperm(x.size(0))\n x = x[perm]\n y = y[perm]\n\n # get a batch by augmented incoming data with old task data, used for computing tiny-loss\n bx, by, bt = self.get_batch(x, y, t)\n\n # inner step of sharpness\n fast_weights = None\n inner_sz = math.ceil(len(x) / self.inner_steps)\n meta_losses = torch.zeros(self.inner_steps).float()\n k = 0\n\n for j in range(0, len(x), inner_sz):\n if j + inner_sz <= len(x):\n batch_x = x[j: j + inner_sz]\n batch_y = y[j: j + inner_sz]\n else:\n batch_x = x[j:]\n batch_y = y[j:]\n\n # samples for sharpness/look-ahead are from the current task\n fast_weights = self.update_weight(batch_x, batch_y, t, fast_weights)\n\n # samples for weight/lambdas update are from the current task and old tasks\n meta_losses[k] = self.meta_loss(bx, by, bt, fast_weights)\n k += 1\n\n # Taking the gradient step\n with torch.autograd.set_detect_anomaly(True):\n self.zero_grads()\n loss = torch.mean(meta_losses)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)\n\n if len(self.M_vec) > 0:\n\n # update the lambdas\n if self.args.method in ['dgpm', 'xdgpm']:\n torch.nn.utils.clip_grad_norm_(self.lambdas.parameters(), self.args.grad_clip_norm)\n if self.args.sharpness:\n self.opt_lamdas.step()\n else:\n self.opt_lamdas_step()\n\n for idx in range(len(self.lambdas)):\n self.lambdas[idx] = nn.Parameter(torch.sigmoid(self.args.tmp * self.lambdas[idx]))\n\n # only use updated lambdas to update weight\n if self.args.method == 'dgpm':\n self.net.zero_grad()\n loss = self.meta_loss(bx, by, bt) # Forward without weight perturbation\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)\n\n # train on the rest of subspace spanned by GPM\n self.train_restgpm()\n # torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.grad_clip_norm)\n self.optimizer.step()\n\n else:\n self.optimizer.step()\n\n self.zero_grads()\n\n # only sample and push to replay buffer once for each task's stream\n # instead of pushing every epoch\n if self.real_epoch == 0:\n self.push_to_mem(x, y, torch.tensor(t))\n\n return loss\n\n\n def update_weight(self, x, y, t, fast_weights):\n \"\"\"\n Add weight perturbation on the important subspace spanned by GPM for sharpness or look-ahead\n \"\"\"\n loss = self.take_loss(x, y, t, fast_weights)\n if fast_weights is None:\n fast_weights = self.net.get_params()\n\n # NOTE if we want higher order grads to be allowed, change create_graph=False to True\n graph_required = self.args.second_order\n grads = list(torch.autograd.grad(loss, fast_weights, create_graph=graph_required, retain_graph=graph_required,\n allow_unused=True))\n\n # Get the projection of grads on the subspace spanned by GPM\n if len(self.M_vec) > 0:\n grads = self.grad_projection(grads)\n\n for i in range(len(grads)):\n if grads[i] is not None:\n grads[i] = torch.clamp(grads[i], min=-self.args.grad_clip_norm, max=self.args.grad_clip_norm)\n\n if self.args.sharpness:\n fast_weights = list(\n map(lambda p: p[1] + p[0] * self.eta1 if p[0] is not None else p[1], zip(grads, fast_weights)))\n else:\n fast_weights = list(\n map(lambda p: p[1] - p[0] * self.eta1 if p[0] is not None else p[1], zip(grads, fast_weights)))\n\n return fast_weights\n\n\n def grad_projection(self, grads):\n \"\"\"\n get the projection of grads on the subspace spanned by GPM\n \"\"\"\n j = 0\n for i in range(len(grads)):\n # only update conv weight and fc weight\n # ignore perturbations with 1 dimension (e.g. BN, bias)\n if grads[i] is None:\n continue\n if grads[i].ndim <= 1:\n continue\n if j < len(self.M_vec):\n if self.args.method in ['dgpm', 'xdgpm']:\n # lambdas = torch.sigmoid(self.args.tmp * self.lambdas[j]).reshape(-1)\n lambdas = self.lambdas[j]\n else:\n lambdas = torch.ones(self.M_vec[j].shape[1])\n\n if self.cuda:\n self.M_vec[j] = self.M_vec[j].cuda()\n lambdas = lambdas.cuda()\n\n if grads[i].ndim == 4:\n # rep[i]: n_samples * n_features\n grad = grads[i].reshape(grads[i].shape[0], -1)\n grad = torch.mm(torch.mm(torch.mm(grad, self.M_vec[j]), torch.diag(lambdas)), self.M_vec[j].T)\n grads[i] = grad.reshape(grads[i].shape).clone()\n else:\n grads[i] = torch.mm(torch.mm(torch.mm(grads[i], self.M_vec[j]), torch.diag(lambdas)), self.M_vec[j].T)\n\n j += 1\n\n return grads\n\n def zero_grads(self):\n self.optimizer.zero_grad()\n self.net.zero_grad()\n if len(self.M_vec) > 0 and self.args.method in ['dgpm', 'xdgpm']:\n self.lambdas.zero_grad()\n\n def define_lambda_params(self):\n assert len(self.M_vec) > 0\n\n # Setup learning parameters\n self.lambdas = nn.ParameterList([])\n for i in range(len(self.M_vec)):\n self.lambdas.append(nn.Parameter(self.args.lam_init * torch.ones((self.M_vec[i].shape[1]), requires_grad=True)))\n\n if self.cuda:\n self.lambdas = self.lambdas.cuda()\n\n return\n\n def update_opt_lambda(self, lr=None):\n if lr is None:\n lr = self.eta2\n self.opt_lamdas = torch.optim.SGD(list(self.lambdas.parameters()), lr=lr, momentum=self.args.momentum)\n\n return\n\n def opt_lamdas_step(self):\n \"\"\"\n Performs a single optimization step, but change gradient descent to ascent\n \"\"\"\n for group in self.opt_lamdas.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad\n if weight_decay != 0:\n d_p = d_p.add(p, alpha=weight_decay)\n if momentum != 0:\n param_state = self.opt_lamdas.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(d_p, alpha=1 - dampening)\n if nesterov:\n d_p = d_p.add(buf, alpha=momentum)\n else:\n d_p = buf\n\n p.data = (p.data + group['lr'] * d_p).clone()\n\n return\n\n"
] |
[
[
"torch.mean",
"torch.sigmoid",
"torch.mm",
"torch.ones",
"torch.autograd.set_detect_anomaly",
"torch.zeros",
"torch.clone",
"torch.tensor",
"torch.diag",
"torch.clamp",
"torch.autograd.grad"
]
] |
asvskartheek/sentiment-analysis
|
[
"9bdc531e31e9b6885eab235ba23d542834bb379e"
] |
[
"pretrained.py"
] |
[
"import argparse\nimport pickle\n\nimport torch\n\nfrom models import *\nimport spacy\n\nfrom utils import count_parameters\n\nnlp = spacy.load(\"en\")\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\n \"--model\", default=\"fast\", type=str, help=\"pre-trained model architecture\"\n)\n\nargs = parser.parse_args()\n\n\ndef predict_sentiment(sentence):\n tokenized = [tok.text for tok in nlp.tokenizer(sentence)]\n indexed = [text_vocab.stoi[t] for t in tokenized]\n length = [len(indexed)]\n tensor = torch.LongTensor(indexed)\n tensor = tensor.unsqueeze(1)\n length_tensor = torch.LongTensor(length)\n packed_info = (tensor, length_tensor)\n prediction = torch.sigmoid(model(packed_info))\n pred = prediction.item()\n print(\"Score: \", pred)\n xd = int(pred > 0.5)\n return label_vocab.itos[xd]\n\n\nif __name__ == \"__main__\":\n model_arch = args.model\n\n pretrained_folder = \"pretrained/\" + model_arch + \"/\"\n ckpt = pretrained_folder + \"checkpoints/trained.ckpt\"\n if model_arch == \"simple\":\n model = SimpleRNNClassifier.load_from_checkpoint(\n ckpt, map_location=torch.device(\"cpu\")\n )\n elif model_arch == \"birnn\":\n model = BiLSTMClassifier.load_from_checkpoint(\n ckpt, map_location=torch.device(\"cpu\")\n )\n elif model_arch == \"fast\":\n model = FastClassifier.load_from_checkpoint(\n ckpt, map_location=torch.device(\"cpu\")\n )\n elif model_arch == \"cnn\":\n model = CNNClassifier.load_from_checkpoint(\n ckpt, map_location=torch.device(\"cpu\")\n )\n else:\n raise ValueError(\"The model doesn't exist, use | simple | birnn | fast | cnn |\")\n exit(1)\n\n print(\"Parameters\")\n print(\"----------\")\n print(count_parameters(model))\n print(\"----------\")\n\n print(\"Loading Vocabulary...\")\n with open(pretrained_folder + \"text.pkl\", \"rb\") as f:\n text_vocab = pickle.load(f)\n with open(pretrained_folder + \"label.pkl\", \"rb\") as f:\n label_vocab = pickle.load(f)\n\n while True:\n text = input(\"Enter Text..\\n\")\n print(predict_sentiment(text))\n"
] |
[
[
"torch.device",
"torch.LongTensor"
]
] |
liannah/credit_default_prediction
|
[
"10a372b9524d726c2d25e6b59fe91e4df1a18b22"
] |
[
"src/clean_split_data.py"
] |
[
"#!/usr/bin/env python\n\n# Author: Taiwo Owoseni\n# date: 2021-11-25\n\n\"\"\"\nCleans and splits raw data into train and test data set and save to file path as csv file.\n\nUsage: src/clean_split_data.py --input_path=<input_path> --out_dir=<out_dir>\n\nOptions:\n--input_path=<input_path> Path (file path) to raw data (script supports only csv)\n--out_dir=<out_dir> Path (directory) to save transformed train and test data\n\"\"\"\n\nimport os\nimport pandas as pd\nfrom docopt import docopt\nfrom sklearn.model_selection import train_test_split\n\nopt = docopt(__doc__)\n\ndef save_file(path_dir, file_name, processed_data): \n \"\"\"\n Saves file.\n\n This function creates a new file by\n saving it to the specified path.\n\n Parameters\n ----------\n path_dir : str\n The path to save the file.\n file_name: str\n The file name of the document.\n processed_data: pd.DataFrame\n The object to be saved.\n\n Examples\n --------\n save_file('data/split', 'train_data', train_df)\n \"\"\"\n \n file_path = os.path.join(path_dir, file_name)\n try:\n processed_data.to_csv(file_path, index = False, encoding='utf-8')\n except:\n os.makedirs(os.path.dirname(file_path))\n processed_data.to_csv(file_path, index = False, encoding='utf-8')\n \ndef read_data(file_path):\n \"\"\"\n Reads a csv file from path.\n\n Parameters\n ----------\n file_path : str\n The path of the file.\n \n Returns\n -------\n data : pd.DataFrame\n A csv file\n\n Examples\n --------\n read_data('data/split/train.csv')\n \"\"\"\n try:\n abs_path = os.path.abspath(file_path)\n except FileNotFoundError:\n raise (\"Absolute path to {input_file} not found in home directory\")\n else:\n data = pd.read_csv(abs_path)\n return data\n\ndef main(input_path, out_dir):\n\n filename, file_extension = os.path.splitext(input_path)\n\n # assertion tests\n assert file_extension == \".csv\", f\"Wrong extesnion type. Extension has to be {file_extension}\"\n\n data = read_data(input_path)\n\n column_list = ['ID','LIMIT_BAL', 'SEX', 'EDUCATION', 'MARRIAGE', 'AGE', 'PAY_0', 'PAY_2',\n 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6', 'BILL_AMT1', 'BILL_AMT2',\n 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1',\n 'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6',\n 'default payment next month']\n\n assert list(data.columns) == column_list, f\"Wrong Data Frame : Features should be {column_list}\"\n data= data.drop(columns= ['ID'])\n data = data.rename(columns={'default payment next month':'DEFAULT_PAYMENT_NEXT_MONTH'})\n train_data, test_data = train_test_split(data, test_size=0.2, random_state=123)\n\n save_file(out_dir, \"cleaned_train.csv\", train_data)\n save_file(out_dir, \"cleaned_test.csv\", test_data)\n save_file(out_dir, \"cleaned_data.csv\", data)\n\nif __name__ == \"__main__\":\n main(opt[\"--input_path\"], opt[\"--out_dir\"])"
] |
[
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split"
]
] |
racoles/PyXRF
|
[
"e53b6fdae2bb4ce95273d9db05d8092b4d8ebec5"
] |
[
"pyxrf/model/load_data_from_db.py"
] |
[
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport h5py\nimport numpy as np\nimport os\nimport json\nimport multiprocessing\nimport pandas as pd\nimport platform\nimport math\nimport time as ttime\nimport copy\nimport re\nfrom distutils.version import LooseVersion\n\nimport logging\nimport warnings\n\ntry:\n import databroker\nexcept ImportError:\n pass\n\nfrom ..core.utils import convert_time_to_nexus_string\nfrom .scan_metadata import ScanMetadataXRF\n\nimport pyxrf\n\npyxrf_version = pyxrf.__version__\n\nlogger = logging.getLogger(__name__)\nwarnings.filterwarnings(\"ignore\")\n\nsep_v = os.sep\n\ntry:\n beamline_name = None\n\n # Attempt to find the configuration file first\n config_path = \"/etc/pyxrf/pyxrf.json\"\n if os.path.isfile(config_path):\n try:\n with open(config_path, \"r\") as beamline_pyxrf:\n beamline_config_pyxrf = json.load(beamline_pyxrf)\n beamline_name = beamline_config_pyxrf[\"beamline_name\"]\n except Exception as ex:\n raise IOError(f\"Error while opening configuration file {config_path!r}\") from ex\n\n else:\n # Otherwise try to identify the beamline using host name\n hostname = platform.node()\n beamline_names = {\n \"xf03id\": \"HXN\",\n \"xf05id\": \"SRX\",\n \"xf08bm\": \"TES\",\n \"xf04bm\": \"XFM\",\n }\n\n for k, v in beamline_names.items():\n if hostname.startswith(k):\n beamline_name = v\n\n if beamline_name is None:\n raise Exception(\"Beamline is not identified\")\n\n if beamline_name == \"HXN\":\n from pyxrf.db_config.hxn_db_config import db\n elif beamline_name == \"SRX\":\n from pyxrf.db_config.srx_db_config import db\n elif beamline_name == \"XFM\":\n from pyxrf.db_config.xfm_db_config import db\n elif beamline_name == \"TES\":\n from pyxrf.db_config.tes_db_config import db\n else:\n db = None\n db_analysis = None\n print(f\"Beamline Database is not used in pyxrf: unknown beamline {beamline_name!r}\")\n\nexcept Exception as ex:\n db = None\n print(f\"Beamline Database is not used in pyxrf: {ex}\")\n\n\ndef flip_data(input_data, subscan_dims=None):\n \"\"\"\n Flip 2D or 3D array. The flip happens on the second index of shape.\n .. warning :: This function mutates the input values.\n\n Parameters\n ----------\n input_data : 2D or 3D array.\n\n Returns\n -------\n flipped data\n \"\"\"\n new_data = np.asarray(input_data)\n data_shape = input_data.shape\n if len(data_shape) == 2:\n if subscan_dims is None:\n new_data[1::2, :] = new_data[1::2, ::-1]\n else:\n i = 0\n for nx, ny in subscan_dims:\n start = i + 1\n end = i + ny\n new_data[start:end:2, :] = new_data[start:end:2, ::-1]\n i += ny\n\n if len(data_shape) == 3:\n if subscan_dims is None:\n new_data[1::2, :, :] = new_data[1::2, ::-1, :]\n else:\n i = 0\n for nx, ny in subscan_dims:\n start = i + 1\n end = i + ny\n new_data[start:end:2, :, :] = new_data[start:end:2, ::-1, :]\n i += ny\n return new_data\n\n\ndef fetch_run_info(run_id_uid):\n \"\"\"\n Fetches key data from start document of the selected run\n\n Parameters\n ----------\n run_id_uid: int or str\n Run ID (positive or negative int) or UID (str, full or short) of the run.\n\n Returns\n -------\n int or str\n Run ID (always positive int) or Run UID (str, always full UID). Returns\n `run_id=-1` and `run_uid=\"\"` in case of failure.\n\n Raises\n ------\n RuntimeError\n failed to fetch the run from Databroker\n \"\"\"\n try:\n hdr = db[run_id_uid]\n run_id = hdr.start[\"scan_id\"]\n run_uid = hdr.start[\"uid\"]\n except Exception:\n if isinstance(run_id_uid, int):\n msg = f\"ID {run_id_uid}\"\n else:\n msg = f\"UID '{run_id_uid}'\"\n raise RuntimeError(f\"Failed to find run with {msg}.\")\n return run_id, run_uid\n\n\ndef fetch_data_from_db(\n run_id_uid,\n fpath=None,\n create_each_det=False,\n fname_add_version=False,\n completed_scans_only=False,\n successful_scans_only=False,\n file_overwrite_existing=False,\n output_to_file=False,\n save_scaler=True,\n num_end_lines_excluded=None,\n):\n \"\"\"\n Read data from databroker.\n This is the place where new beamlines can be easily added\n to pyxrf GUI.\n Save the data from databroker to hdf file if needed.\n\n .. note:: Requires the databroker package from NSLS2\n\n Parameters\n ----------\n runid : int\n id number for given run\n fpath: str, optional\n path to save hdf file\n create_each_det: bool, optional\n Do not create data for each detector is data size is too large,\n if set as false. This will slow down the speed of creating hdf file\n with large data size. srx beamline only.\n fname_add_version : bool\n True: if file already exists, then file version is added to the file name\n so that it becomes unique in the current directory. The version is\n added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.\n False: then conversion fails.\n completed_scans_only : bool\n True: process only completed scans (for which ``stop`` document exists in\n the database). Failed scan for which ``stop`` document exists are considered\n completed even if not the whole image was scanned. If incomplete scan is\n encountered, an exception is thrown.\n False: the feature is disabled, incomplete scan will be processed.\n file_overwrite_existing : bool, keyword parameter\n This option should be used if the existing file should be deleted and replaced\n with the new file with the same name. This option should be used with caution,\n since the existing file may contain processed data, which will be permanently deleted.\n True: overwrite existing files if needed. Note, that if ``fname_add_version`` is ``True``,\n then new versions of the existing file will always be created.\n False: do not overwrite existing files. If the file already exists, then the exception\n will be raised (loading the single scan) or the scan will be skipped (loading the range\n of scans).\n output_to_file : bool, optional\n save data to hdf5 file if True\n save_scaler : bool, optional\n choose to save scaler data or not for srx beamline, test purpose only.\n num_end_lines_excluded : int, optional\n remove the last few bad lines\n\n Returns\n -------\n dict of data in 2D format matching x,y scanning positions\n \"\"\"\n hdr = db[-1]\n print(\"Loading data from database.\")\n\n if hdr.start.beamline_id == \"HXN\":\n data = map_data2D_hxn(\n run_id_uid,\n fpath,\n create_each_det=create_each_det,\n fname_add_version=fname_add_version,\n completed_scans_only=completed_scans_only,\n successful_scans_only=successful_scans_only,\n file_overwrite_existing=file_overwrite_existing,\n output_to_file=output_to_file,\n )\n elif hdr.start.beamline_id == \"xf05id\" or hdr.start.beamline_id == \"SRX\":\n data = map_data2D_srx(\n run_id_uid,\n fpath,\n create_each_det=create_each_det,\n fname_add_version=fname_add_version,\n completed_scans_only=completed_scans_only,\n successful_scans_only=successful_scans_only,\n file_overwrite_existing=file_overwrite_existing,\n output_to_file=output_to_file,\n save_scaler=save_scaler,\n num_end_lines_excluded=num_end_lines_excluded,\n )\n elif hdr.start.beamline_id == \"XFM\":\n data = map_data2D_xfm(\n run_id_uid,\n fpath,\n create_each_det=create_each_det,\n fname_add_version=fname_add_version,\n completed_scans_only=completed_scans_only,\n successful_scans_only=successful_scans_only,\n file_overwrite_existing=file_overwrite_existing,\n output_to_file=output_to_file,\n )\n elif hdr.start.beamline_id == \"TES\":\n data = map_data2D_tes(\n run_id_uid,\n fpath,\n create_each_det=create_each_det,\n fname_add_version=fname_add_version,\n completed_scans_only=completed_scans_only,\n successful_scans_only=successful_scans_only,\n file_overwrite_existing=file_overwrite_existing,\n output_to_file=output_to_file,\n )\n else:\n print(\"Databroker is not setup for this beamline\")\n return\n free_memory_from_handler()\n return data\n\n\ndef make_hdf(\n start,\n end=None,\n *,\n fname=None,\n wd=None,\n fname_add_version=False,\n completed_scans_only=False,\n successful_scans_only=False,\n file_overwrite_existing=False,\n prefix=\"scan2D_\",\n create_each_det=False,\n save_scaler=True,\n num_end_lines_excluded=None,\n):\n \"\"\"\n Load data from database and save it in HDF5 files.\n\n Parameters\n ----------\n\n start : int\n Run ID (positive or negative int) or of the first scan to convert or Run UID\n (str, full or short). If `start` is UID, then `end` must not be provided or set to None.\n end : int, optional\n scan ID of the last scan to convert. If ``end`` is not specified or None, then\n only the scan with ID ``start`` is converted and an exception is raised if an\n error occurs during the conversion. If ``end`` is specified, then scans in the\n range ``scan``..``end`` are converted and a scan in the sequence is skipped\n if there is an issue during the conversion. For example:\n\n .. code-block:: python\n\n make_hdf(2342)\n\n will process scan #2342 and throw an exception if error occurs. On the other hand\n\n .. code-block:: python\n\n make_hdf(2342, 2342)\n\n will process scan #2342 and write data to file if conversion is successful, otherwise\n no file will be created. The scans with IDs in the range 2342..2441 can be processed by\n calling\n\n .. code-block:: python\n\n make_hdf(2342, 2441)\n\n Scans with IDs in specified range, but not existing in the database, or scans causing errors\n during conversion will be skipped.\n\n fname : string, optional keyword parameter\n path to save data file when ``end`` is ``None`` (only one scan is processed).\n File name is created automatically if ``fname`` is not specified.\n wd : str\n working directory, the file(s) will be created in this directory. The directory\n will be created if it does not exist. If ``wd`` is not specified, then the file(s)\n will be saved to the current directory.\n fname_add_version : bool, keyword parameter\n True: if file already exists, then file version is added to the file name\n so that it becomes unique in the current directory. The version is\n added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.\n False: then conversion fails. If ``end`` is ``None``, then\n the exception is raised. If ``end`` is specified, the scan is skipped\n and the next scan in the range is processed.\n completed_scans_only : bool, keyword parameter\n True: process only completed scans (for which ``stop`` document exists in\n the database). Failed scan for which ``stop`` document exists are considered\n completed even if not the whole image was scanned. If incomplete scan is\n encountered: an exception is thrown (``end`` is not specified) or the scan\n is skipped (``end`` is specified). This feature allows to use\n ``make_hdf`` as part of the script for real time data analysis:\n\n .. code-block:: python\n\n # Wait time between retires in seconds. Select the value appropriate\n # for the workflow type.\n wait_time = 600 # Wait for 10 minuts between retries.\n for scan_id in range(n_start, n_start + n_scans):\n while True:\n try:\n # Load scan if it is available\n make_hdf(scan_id, completed_scans_only=True)\n # Process the file using the prepared parameter file\n pyxrf_batch(scan_id, param_file_name=\"some_parameter_file.json\")\n break\n except Exception:\n time.sleep(wait_time)\n\n Such scripts are currently used at HXN and SRX beamlines of NSLS-II, so this feature\n supports the existing workflows.\n False: the feature is disabled, incomplete scan will be processed.\n successful_scans_only : bool, keyword parameter\n Similar to ``complete_scans_only``. The file is created only if the stop document\n exists and ``exit_status=='success'``.\n file_overwrite_existing : bool, keyword parameter\n This option should be used if the existing file should be deleted and replaced\n with the new file with the same name. This option should be used with caution,\n since the existing file may contain processed data, which will be permanently deleted.\n True: overwrite existing files if needed. Note, that if ``fname_add_version`` is ``True``,\n then new versions of the existing file will always be created.\n False: do not overwrite existing files. If the file already exists, then the exception\n will be raised (loading the single scan) or the scan will be skipped (loading the range\n of scans).\n prefix : str, optional\n prefix name of the created data file. If ``fname`` is not specified, it is generated\n automatically in the form ``<prefix>_<scanID>_<some_additional_data>.h5``\n create_each_det: bool, optional\n True: save data for each available detector channel into a file. Enabling this\n feature leads to larger data files. Inspection of data from individual channels\n of the detector may be helpful in evaluation of quality of the detector calibration\n and adds flexibility to data analysis. This feature may be disabled if large number\n of routine scans recorded by well tested system are processed and disk space\n is an issue.\n False: disable the feature. Only the sum of all detector channels is saved\n to disk.\n save_scaler : bool, optional\n True: save scaler data in the data file\n False: do not save scaler data\n num_end_lines_excluded : int, optional\n The number of lines at the end of the scan that will not be saved to the data file.\n \"\"\"\n\n if wd:\n # Create the directory\n wd = os.path.expanduser(wd)\n wd = os.path.abspath(wd) # 'make_dirs' does not accept paths that contain '..'\n os.makedirs(wd, exist_ok=True) # Does nothing if the directory already exists\n\n if isinstance(start, str) or (end is None):\n # Two cases: only one Run ID ('start') is provided or 'start' is Run UID.\n # In both cases only one run is loaded.\n if end is not None:\n raise ValueError(r\"Parameter 'end' must be None if run is loaded by UID\")\n\n run_id, run_uid = fetch_run_info(start) # This may raise RuntimeException\n\n # Load one scan with ID specified by ``start``\n # If there is a problem while reading the scan, the exception is raised.\n if fname is None:\n fname = prefix + str(run_id) + \".h5\"\n if wd:\n fname = os.path.join(wd, fname)\n fetch_data_from_db(\n run_uid,\n fpath=fname,\n create_each_det=create_each_det,\n fname_add_version=fname_add_version,\n completed_scans_only=completed_scans_only,\n successful_scans_only=successful_scans_only,\n file_overwrite_existing=file_overwrite_existing,\n output_to_file=True,\n save_scaler=save_scaler,\n num_end_lines_excluded=num_end_lines_excluded,\n )\n else:\n # Both ``start`` and ``end`` are specified. Convert the scans in the range\n # ``start`` .. ``end``. If there is a problem reading the scan,\n # then the scan is skipped and the next scan is processed\n datalist = range(start, end + 1)\n for v in datalist:\n fname = prefix + str(v) + \".h5\"\n if wd:\n fname = os.path.join(wd, fname)\n try:\n fetch_data_from_db(\n v,\n fpath=fname,\n create_each_det=create_each_det,\n fname_add_version=fname_add_version,\n completed_scans_only=completed_scans_only,\n successful_scans_only=successful_scans_only,\n file_overwrite_existing=file_overwrite_existing,\n output_to_file=True,\n save_scaler=save_scaler,\n num_end_lines_excluded=num_end_lines_excluded,\n )\n print(f\"Scan #{v}: Conversion completed.\\n\")\n except Exception as ex:\n print(f\"Scan #{v}: Can not complete the conversion\")\n print(f\" ({ex})\\n\")\n\n\ndef _is_scan_complete(hdr):\n \"\"\"Checks if the scan is complete ('stop' document exists)\n\n Parameters\n ----------\n\n hdr : databroker.core.Header\n header of the run\n hdr = db[scan_id]\n The header must be reloaded each time before the function is called.\n\n Returns\n -------\n\n True: scan is complete\n False: scan is incomplete (still running)\n \"\"\"\n\n # hdr.stop is an empty dictionary if the scan is incomplete\n return bool(hdr.stop)\n\n\ndef _is_scan_successful(hdr):\n \"\"\"\n Checks if the scan is successful\n \"\"\"\n return bool(hdr.stop) and hdr.stop[\"exit_status\"] == \"success\"\n\n\ndef _extract_metadata_from_header(hdr):\n \"\"\"\n Extract metadata from start and stop document. Metadata extracted from other document\n in the scan are beamline specific and added to dictionary at later time.\n \"\"\"\n start_document = hdr.start\n\n mdata = ScanMetadataXRF()\n\n data_locations = {\n \"scan_id\": [\"scan_id\"],\n \"scan_uid\": [\"uid\"],\n \"scan_instrument_id\": [\"beamline_id\"],\n \"scan_instrument_name\": [],\n \"scan_end_station\": [],\n \"scan_time_start\": [\"time\"],\n \"scan_time_start_utc\": [\"time\"],\n \"instrument_mono_incident_energy\": [\"beamline_status/energy\", \"scan/energy\"],\n \"instrument_beam_current\": [],\n \"instrument_detectors\": [\"detectors\", \"scan/detectors\"],\n \"sample_name\": [\"sample/name\", \"sample\", \"scan/sample_name\"],\n \"experiment_plan_name\": [\"plan_name\"],\n \"experiment_plan_type\": [\"plan_type\"],\n \"proposal_num\": [\"proposal/proposal_num\"],\n \"proposal_title\": [\"proposal/proposal_title\"],\n \"proposal_PI_lastname\": [\"proposal/PI_lastname\"],\n \"proposal_saf_num\": [\"proposal/saf_num\"],\n \"proposal_cycle\": [\"proposal/cycle\"],\n # Scan parameters\n \"param_type\": [\"scan/type\"],\n \"param_input\": [\"scan/scan_input\"],\n \"param_dwell\": [\"scan/dwell\", \"exposure_time\"],\n \"param_snake\": [\"scan/snake\"],\n \"param_shape\": [\"scan/shape\", \"shape\"],\n \"param_theta\": [\"scan/theta/val\"],\n \"param_theta_units\": [\"scan/theta/units\"],\n \"param_delta\": [\"scan/delta/val\"],\n \"param_delta_units\": [\"scan/delta/units\"],\n \"param_fast_axis\": [\"scaninfo/fast_axis\", \"scan/fast_axis/motor_name\"],\n \"param_fast_axis_units\": [\"scan/fast_axis/units\"],\n \"param_slow_axis\": [\"scaninfo/slow_axis\", \"scan/slow_axis/motor_name\"],\n \"param_slow_axis_units\": [\"scan/slow_axis/units\"],\n }\n\n for key, locations in data_locations.items():\n # Go to the next key if no location is defined for the current key.\n # No locations means that the data is not yet defined in start document on any beamline\n # Multiple locations point to locations at different beamlines\n if not locations:\n continue\n\n # For each metadata key there could be none, one or multiple locations in the start document\n for loc in locations:\n path = loc.split(\"/\") #\n ref = start_document\n for n, p in enumerate(path):\n if n >= len(path) - 1:\n break\n # 'ref' must always point to dictionary\n if not isinstance(ref, dict):\n ref = None\n break\n if p in ref:\n ref = ref[p]\n else:\n ref = None\n break\n # At this point 'ref' must be a dictionary\n value = None\n if ref is not None and isinstance(ref, dict):\n if path[-1] in ref:\n value = ref[path[-1]]\n # Now we finally arrived to the end of the path: the 'value' must be a scalar or a list\n if value is not None and not isinstance(value, dict):\n if path[-1] == \"time\":\n if key.endswith(\"_utc\"):\n value = convert_time_to_nexus_string(ttime.gmtime(value))\n else:\n value = convert_time_to_nexus_string(ttime.localtime(value))\n mdata[key] = value\n break\n\n stop_document = hdr.stop\n\n if stop_document:\n\n if \"time\" in stop_document:\n t = stop_document[\"time\"]\n mdata[\"scan_time_stop\"] = convert_time_to_nexus_string(ttime.localtime(t))\n mdata[\"scan_time_stop_utc\"] = convert_time_to_nexus_string(ttime.gmtime(t))\n\n if \"exit_status\" in stop_document:\n mdata[\"scan_exit_status\"] = stop_document[\"exit_status\"]\n\n else:\n\n mdata[\"scan_exit_status\"] = \"incomplete\"\n\n # Add full beamline name (if available, otherwise don't create the entry).\n # Also, don't overwrite the existing name if it was read from the start document\n if \"scan_instrument_id\" in mdata and \"scan_instrument_name\" not in mdata:\n instruments = {\n \"srx\": \"Submicron Resolution X-ray Spectroscopy\",\n \"hxn\": \"Hard X-ray Nanoprobe\",\n \"tes\": \"Tender Energy X-ray Absorption Spectroscopy\",\n \"xfm\": \"X-ray Fluorescence Microprobe\",\n }\n iname = instruments.get(mdata[\"scan_instrument_id\"].lower(), \"\")\n if iname:\n mdata[\"scan_instrument_name\"] = iname\n\n return mdata\n\n\ndef _get_metadata_value_from_descriptor_document(hdr, *, data_key, stream_name=\"baseline\"):\n \"\"\"\n Returns the first occurrence of the variable with the name ``data_key`` in\n specified document stream. Returns ``None`` if the variable is not found\n \"\"\"\n value = None\n docs = hdr.documents(stream_name=stream_name)\n for name, doc in docs:\n if (name != \"event\") or (\"descriptor\" not in doc):\n continue\n try:\n value = doc[\"data\"][data_key]\n break # Don't go through the rest of the documents\n except Exception:\n pass\n\n return value\n\n\ndef _get_metadata_all_from_descriptor_document(hdr, *, data_key, stream_name=\"baseline\"):\n \"\"\"\n Returns the list of the recorded values of variables with the name ``data_key`` in\n specified document stream. Returns ``None`` if the variable is not found\n \"\"\"\n value = []\n docs = hdr.documents(stream_name=stream_name)\n for name, doc in docs:\n if (name != \"event\") or (\"descriptor\" not in doc):\n continue\n try:\n value.append(doc[\"data\"][data_key])\n except Exception:\n pass\n\n value = value or None # Replace [] with None\n\n return value\n\n\ndef map_data2D_hxn(\n run_id_uid,\n fpath,\n create_each_det=False,\n fname_add_version=False,\n completed_scans_only=False,\n successful_scans_only=False,\n file_overwrite_existing=False,\n output_to_file=True,\n):\n \"\"\"\n Save the data from databroker to hdf file.\n\n .. note:: Requires the databroker package from NSLS2\n\n Parameters\n ----------\n run_id_uid : int\n ID or UID of a run\n fpath: str\n path to save hdf file\n create_each_det: bool, optional\n Do not create data for each detector is data size is too large,\n if set as false. This will slow down the speed of creating hdf file\n with large data size.\n fname_add_version : bool\n True: if file already exists, then file version is added to the file name\n so that it becomes unique in the current directory. The version is\n added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.\n False: then conversion fails.\n completed_scans_only : bool\n True: process only completed scans (for which ``stop`` document exists in\n the database). Failed scan for which ``stop`` document exists are considered\n completed even if not the whole image was scanned. If incomplete scan is\n encountered: an exception is thrown.\n False: the feature is disabled, incomplete scan will be processed.\n file_overwrite_existing : bool, keyword parameter\n This option should be used if the existing file should be deleted and replaced\n with the new file with the same name. This option should be used with caution,\n since the existing file may contain processed data, which will be permanently deleted.\n True: overwrite existing files if needed. Note, that if ``fname_add_version`` is ``True``,\n then new versions of the existing file will always be created.\n False: do not overwrite existing files. If the file already exists, then the exception\n is raised.\n output_to_file : bool, optional\n save data to hdf5 file if True\n \"\"\"\n hdr = db[run_id_uid]\n runid = hdr.start[\"scan_id\"] # Replace with the true value (runid may be relative, such as -2)\n\n logger.info(f\"Loading scan #{runid}\")\n if completed_scans_only and not _is_scan_complete(hdr):\n raise Exception(\"Scan is incomplete. Only completed scans are currently processed.\")\n if successful_scans_only and not _is_scan_successful(hdr):\n raise Exception(\n \"Scan is not successfully completed. Only successfully completed scans are currently processed.\"\n )\n\n # Generate the default file name for the scan\n if fpath is None:\n fpath = f\"scan2D_{runid}.h5\"\n\n # Output data is the list of data structures for all available detectors\n data_output = []\n\n start_doc = hdr[\"start\"]\n\n # Exclude certain types of plans based on data from the start document\n if start_doc[\"plan_type\"] in (\"FlyPlan1D\",):\n raise RuntimeError(f\"Failed to load the plan: plan {start_doc['plan_type']!r} is not supported\")\n\n # The dictionary holding scan metadata\n mdata = _extract_metadata_from_header(hdr)\n # Some metadata is located at specific places in the descriptor documents\n # Search through the descriptor documents for the metadata\n v = _get_metadata_value_from_descriptor_document(\n hdr, data_key=\"beamline_status_beam_current\", stream_name=\"baseline\"\n )\n if v is not None:\n mdata[\"instrument_beam_current\"] = v\n\n v = _get_metadata_value_from_descriptor_document(hdr, data_key=\"energy\", stream_name=\"baseline\")\n if v is not None:\n mdata[\"instrument_mono_incident_energy\"] = v\n\n # --------------------------------------------------------------------------------------------\n # IDENTIFY END STATION AND SELECT THE APPROPRIATE THETA ANGLE FROM BASELINE\n # Identify endstation\n end_station = \"\"\n es_motors = hdr.start[\"motors\"]\n motors_mll, motors_zp = (\"dssx\", \"dssy\", \"dssz\"), (\"zpssx\", \"zpssy\", \"zpssz\")\n if es_motors[0] in motors_mll:\n end_station = \"MLL\"\n elif es_motors[0] in motors_zp:\n end_station = \"ZP\"\n else:\n logger.warning(\"Failed to identify end station from data found in start document.\")\n if end_station:\n mdata[\"scan_end_station\"] = end_station\n\n logger.info(f\"Identified beamline end station: {end_station!r}\")\n\n # Get theta angles (each scan has the angles for both endstations, but we need to pick one)\n v = _get_metadata_value_from_descriptor_document(\n hdr, data_key=\"beamline_status_beam_current\", stream_name=\"baseline\"\n )\n if end_station == \"MLL\":\n theta = _get_metadata_value_from_descriptor_document(hdr, data_key=\"dsth\", stream_name=\"baseline\") # MLL\n elif end_station == \"ZP\":\n theta = _get_metadata_value_from_descriptor_document(hdr, data_key=\"zpsth\", stream_name=\"baseline\") # ZP\n else:\n theta = None\n # Add theta to the the metadata\n if theta is not None:\n mdata[\"param_theta\"] = round(theta * 1000) # Convert to mdeg (same as SRX)\n mdata[\"param_theta_units\"] = \"mdeg\"\n theta = round(theta, 3) # Better presentation\n else:\n logger.warning(\"Angle 'theta' is not found and is not included in the HDF file metadata\")\n # -----------------------------------------------------------------------------------------------\n # Determine fast axis and slow axis\n fast_axis, slow_axis = start_doc.get(\"fast_axis\", None), None\n motors = start_doc.get(\"motors\", None)\n if motors and isinstance(motors, (list, tuple)) and len(motors) == 2:\n fast_axis = fast_axis if fast_axis else motors[0]\n fast_axis_index = motors.index(fast_axis, 0)\n slow_axis_index = 0 if (fast_axis_index == 1) else 1\n slow_axis = motors[slow_axis_index]\n if fast_axis:\n mdata[\"param_fast_axis\"] = fast_axis\n if slow_axis:\n mdata[\"param_slow_axis\"] = slow_axis\n # -----------------------------------------------------------------------------------------------\n # Reconstruct scan input\n try:\n plan_args = start_doc[\"plan_args\"]\n # px_motor = plan_args[\"motor1\"]\n px_start, px_end, px_step = plan_args[\"scan_start1\"], plan_args[\"scan_end1\"], plan_args[\"num1\"]\n # py_motor = plan_args[\"motor2\"]\n py_start, py_end, py_step = plan_args[\"scan_start2\"], plan_args[\"scan_end2\"], plan_args[\"num2\"]\n dwell_time = plan_args[\"exposure_time\"]\n param_input = [px_start, px_end, px_step, py_start, py_end, py_step, dwell_time]\n mdata[\"param_input\"] = param_input\n except Exception as ex:\n logger.warning(\n \"Failed to reconstruct scan input: %s. Scan input is not saved as part of metadata to HDF5 file\",\n str(ex),\n )\n # -------------------------------------------------------------------------------------------------\n\n if \"dimensions\" in start_doc:\n datashape = start_doc.dimensions\n elif \"shape\" in start_doc:\n datashape = start_doc.shape\n else:\n logger.error(\"No dimension/shape is defined in hdr.start.\")\n\n datashape = [datashape[1], datashape[0]] # vertical first, then horizontal\n fly_type = start_doc.get(\"fly_type\", None)\n subscan_dims = start_doc.get(\"subscan_dims\", None)\n\n if \"motors\" in hdr.start:\n pos_list = hdr.start.motors\n elif \"axes\" in hdr.start:\n pos_list = hdr.start.axes\n else:\n pos_list = [\"zpssx[um]\", \"zpssy[um]\"]\n\n current_dir = os.path.dirname(os.path.realpath(__file__))\n config_file = \"hxn_pv_config.json\"\n config_path = sep_v.join(current_dir.split(sep_v)[:-2] + [\"configs\", config_file])\n with open(config_path, \"r\") as json_data:\n config_data = json.load(json_data)\n\n keylist = hdr.descriptors[0].data_keys.keys()\n det_list = [v for v in keylist if \"xspress3\" in v] # find xspress3 det with key word matching\n\n scaler_list_all = config_data[\"scaler_list\"]\n\n all_keys = hdr.descriptors[0].data_keys.keys()\n scaler_list = [v for v in scaler_list_all if v in all_keys]\n\n # fields = det_list + scaler_list + pos_list\n data = db.get_table(hdr, fill=True)\n\n data_out = map_data2D(\n data,\n datashape,\n det_list=det_list,\n pos_list=pos_list,\n scaler_list=scaler_list,\n create_each_det=create_each_det,\n fly_type=fly_type,\n subscan_dims=subscan_dims,\n spectrum_len=4096,\n )\n\n # Transform coordinates for the fast axis if necessary:\n # Flip the direction of the fast axis for certain angles\n if (theta is not None) and fast_axis.lower().endswith(\"z\") and (theta < 0):\n logger.info(f\"Fast axis: {fast_axis!r}. Angle 'theta': {theta}. Flipping data along the fast axis ...\")\n data_out[\"pos_data\"][fast_axis_index, :, :] = np.fliplr(data_out[\"pos_data\"][fast_axis_index, :, :])\n data_out[\"scaler_data\"] = np.flip(data_out[\"scaler_data\"], axis=1)\n data_out[\"det_sum\"] = np.flip(data_out[\"det_sum\"], axis=1)\n for k in data.keys():\n if re.search(r\"^det[\\d]+$\", k): # Individual detectors such as 'det1', 'det2', etc.\n data_out[k] = np.flip(data_out[k], axis=1)\n else:\n logger.info(\n f\"Fast axis: {fast_axis!r}. Angle 'theta': {theta}. Data along the fast axis is not reordered.\"\n )\n\n # Correct positions for distortions due to rotation of the stage\n if theta is not None:\n if fast_axis.lower().endswith(\"x\"):\n logger.info(f\"Scaling the positions along fast X-axis ({fast_axis!r}: 'theta'={theta}) ...\")\n data_out[\"pos_data\"][fast_axis_index, :, :] *= np.cos(theta * np.pi / 180.0)\n elif fast_axis.lower().endswith(\"z\"):\n logger.info(f\"Scaling the positions along fast Z-axis ({fast_axis!r}: 'theta'={theta}) ...\")\n data_out[\"pos_data\"][fast_axis_index, :, :] *= np.sin(theta * np.pi / 180.0)\n else:\n logger.info(f\"No scaling is applied to the positions along fast axis ({fast_axis!r})\")\n\n if output_to_file:\n # output to file\n print(\"Saving data to hdf file.\")\n fpath = save_data_to_hdf5(\n fpath,\n data_out,\n metadata=mdata,\n fname_add_version=fname_add_version,\n file_overwrite_existing=file_overwrite_existing,\n create_each_det=create_each_det,\n )\n\n detector_name = \"xpress3\"\n d_dict = {\"dataset\": data_out, \"file_name\": fpath, \"detector_name\": detector_name, \"metadata\": mdata}\n data_output.append(d_dict)\n\n return data_output\n\n # write_db_to_hdf(fpath, data, datashape,\n # det_list=det_list, pos_list=pos_list,\n # scaler_list=scaler_list,\n # fly_type=fly_type, subscan_dims=subscan_dims)\n #\n # # use suitcase to save baseline data, and scaler data from primary\n # tmp = set()\n # for descriptor in hdr.descriptors:\n # # no 3D vector data\n # xs3 = [key for key in descriptor.data_keys.keys() if 'xspress3' in key]\n # tmp.update(xs3)\n # tmp.add('merlin1')\n # fds = sc.filter_fields(hdr, tmp)\n # if full_data == True:\n # sc.export(hdr, fpath, db.mds, fields=fds, use_uid=False)\n\n\ndef get_total_scan_point(hdr):\n \"\"\"\n Find the how many data points are recorded. This number may not equal to the total number\n defined at the start of the scan due to scan stop or abort.\n \"\"\"\n evs = hdr.events()\n n = 0\n try:\n for e in evs:\n n = n + 1\n except IndexError:\n pass\n return n\n\n\ndef map_data2D_srx(\n run_id_uid,\n fpath,\n create_each_det=False,\n fname_add_version=False,\n completed_scans_only=False,\n successful_scans_only=False,\n file_overwrite_existing=False,\n output_to_file=True,\n save_scaler=True,\n num_end_lines_excluded=None,\n):\n \"\"\"\n Transfer the data from databroker into a correct format following the\n shape of 2D scan.\n This function is used at SRX beamline for both fly scan and step scan.\n Save to hdf file if needed.\n\n .. note:: Requires the databroker package from NSLS2\n\n Parameters\n ----------\n run_id_uid : int\n ID or UID of a run\n fpath: str\n path to save hdf file\n create_each_det: bool, optional\n Do not create data for each detector is data size is too large,\n if set as false. This will slow down the speed of creating hdf file\n with large data size.\n fname_add_version : bool\n True: if file already exists, then file version is added to the file name\n so that it becomes unique in the current directory. The version is\n added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.\n False: then conversion fails.\n completed_scans_only : bool\n True: process only completed scans (for which ``stop`` document exists in\n the database). Failed scan for which ``stop`` document exists are considered\n completed even if not the whole image was scanned. If incomplete scan is\n encountered: an exception is thrown.\n False: the feature is disabled, incomplete scan will be processed.\n file_overwrite_existing : bool, keyword parameter\n This option should be used if the existing file should be deleted and replaced\n with the new file with the same name. This option should be used with caution,\n since the existing file may contain processed data, which will be permanently deleted.\n True: overwrite existing files if needed. Note, that if ``fname_add_version`` is ``True``,\n then new versions of the existing file will always be created.\n False: do not overwrite existing files. If the file already exists, then the exception\n is raised.\n output_to_file : bool, optional\n save data to hdf5 file if True\n save_scaler : bool, optional\n choose to save scaler data or not for srx beamline, test purpose only.\n num_end_lines_excluded : int, optional\n remove the last few bad lines\n\n Returns\n -------\n dict of data in 2D format matching x,y scanning positions\n \"\"\"\n hdr = db[run_id_uid]\n start_doc = hdr[\"start\"]\n use_new_format = \"md_version\" in start_doc\n\n if use_new_format:\n return map_data2D_srx_new(\n run_id_uid=run_id_uid,\n fpath=fpath,\n create_each_det=create_each_det,\n fname_add_version=fname_add_version,\n completed_scans_only=completed_scans_only,\n successful_scans_only=successful_scans_only,\n file_overwrite_existing=file_overwrite_existing,\n output_to_file=output_to_file,\n save_scaler=save_scaler,\n num_end_lines_excluded=num_end_lines_excluded,\n )\n else:\n return map_data2D_srx_old(\n run_id_uid=run_id_uid,\n fpath=fpath,\n create_each_det=create_each_det,\n fname_add_version=fname_add_version,\n completed_scans_only=completed_scans_only,\n successful_scans_only=successful_scans_only,\n file_overwrite_existing=file_overwrite_existing,\n output_to_file=output_to_file,\n save_scaler=save_scaler,\n num_end_lines_excluded=num_end_lines_excluded,\n )\n\n\ndef map_data2D_srx_old(\n run_id_uid,\n fpath,\n create_each_det=False,\n fname_add_version=False,\n completed_scans_only=False,\n successful_scans_only=False,\n file_overwrite_existing=False,\n output_to_file=True,\n save_scaler=True,\n num_end_lines_excluded=None,\n):\n \"\"\"\n Transfer the data from databroker into a correct format following the\n shape of 2D scan.\n This function is used at SRX beamline for both fly scan and step scan.\n Save to hdf file if needed.\n\n .. note:: Requires the databroker package from NSLS2\n\n Parameters\n ----------\n run_id_uid : int\n ID or UID of a run\n fpath: str\n path to save hdf file\n create_each_det: bool, optional\n Do not create data for each detector is data size is too large,\n if set as false. This will slow down the speed of creating hdf file\n with large data size.\n fname_add_version : bool\n True: if file already exists, then file version is added to the file name\n so that it becomes unique in the current directory. The version is\n added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.\n False: then conversion fails.\n completed_scans_only : bool\n True: process only completed scans (for which ``stop`` document exists in\n the database). Failed scan for which ``stop`` document exists are considered\n completed even if not the whole image was scanned. If incomplete scan is\n encountered: an exception is thrown.\n False: the feature is disabled, incomplete scan will be processed.\n file_overwrite_existing : bool, keyword parameter\n This option should be used if the existing file should be deleted and replaced\n with the new file with the same name. This option should be used with caution,\n since the existing file may contain processed data, which will be permanently deleted.\n True: overwrite existing files if needed. Note, that if ``fname_add_version`` is ``True``,\n then new versions of the existing file will always be created.\n False: do not overwrite existing files. If the file already exists, then the exception\n is raised.\n output_to_file : bool, optional\n save data to hdf5 file if True\n save_scaler : bool, optional\n choose to save scaler data or not for srx beamline, test purpose only.\n num_end_lines_excluded : int, optional\n remove the last few bad lines\n\n Returns\n -------\n dict of data in 2D format matching x,y scanning positions\n \"\"\"\n hdr = db[run_id_uid]\n runid = hdr.start[\"scan_id\"] # Replace with the true value (runid may be relative, such as -2)\n\n print(\"Scan metadata format: old SRX specification.\")\n\n if completed_scans_only and not _is_scan_complete(hdr):\n raise Exception(\"Scan is incomplete. Only completed scans are currently processed.\")\n if successful_scans_only and not _is_scan_successful(hdr):\n raise Exception(\n \"Scan is not successfully completed. Only successfully completed scans are currently processed.\"\n )\n\n spectrum_len = 4096\n start_doc = hdr[\"start\"]\n\n # The dictionary holding scan metadata\n mdata = _extract_metadata_from_header(hdr)\n plan_n = start_doc.get(\"plan_name\")\n\n # Load configuration file\n current_dir = os.path.dirname(os.path.realpath(__file__))\n config_file = \"srx_pv_config.json\"\n config_path = sep_v.join(current_dir.split(sep_v)[:-2] + [\"configs\", config_file])\n with open(config_path, \"r\") as json_data:\n config_data = json.load(json_data)\n\n # Generate the default file name for the scan\n if fpath is None:\n fpath = f\"scan2D_{runid}.h5\"\n\n # Output data is the list of data structures for all available detectors\n data_output = []\n\n # There may be no 'plan_name' key in the old stepscans\n if (plan_n is None) or (\"fly\" not in plan_n): # not fly scan\n\n print()\n print(\"****************************************\")\n print(\" Loading SRX step scan \")\n print(\"****************************************\")\n\n # Examples for testing on SRX beamline:\n # good 'old-style' step scan ID: 2357 UID: e063146b-103a-40c5-9266-2201f157e950\n # good 'new-style' step scan ID: 18015 UID: 6ae30aa1-5834-4641-8e68-5eaad4669ce0\n\n fly_type = None\n\n if num_end_lines_excluded is None:\n # It seems like the 'shape' in plan is in the form of [y, x], where\n # y - is the vertical and x is horizontal axis. This matches the\n # shape of the matrix that is used for storage of the maps.\n # In step scan, the results are represented as 1D array, not 2D array,\n # so it needs to be reshaped before processing. So the datashape\n # needs to be determined correctly.\n # We also assume that scanning is performed along the x-axis first\n # before stepping along y-axis. Snaking may be on or off.\n # Different order (along y-axis first, then along x-axis) will require\n # some additional parameter in the start document to indicate this.\n # And the 'datashape' will need to be set the opposite way. Also\n # the map representation will be transposed.\n datashape = [start_doc[\"shape\"][0], start_doc[\"shape\"][1]]\n else:\n datashape = [start_doc[\"shape\"][0] - num_end_lines_excluded, start_doc[\"shape\"][1]]\n\n snake_scan = start_doc.get(\"snaking\")\n if snake_scan[1] is True:\n fly_type = \"pyramid\"\n\n if hdr.start.get(\"plan_type\") == \"OuterProductAbsScanPlan\":\n # This is 'old-style' step scan\n detector_list = [\"xs_settings_ch1\", \"xs_settings_ch2\", \"xs_settings_ch3\"]\n scaler_list = [\"current_preamp_ch2\"]\n else:\n # This is 'new-style' step scan\n detector_list = config_data[\"xrf_detector\"]\n scaler_list = config_data[\"scaler_list\"]\n\n try:\n data = hdr.table(fill=True, convert_times=False)\n\n except IndexError:\n total_len = get_total_scan_point(hdr) - 2\n evs, _ = zip(*zip(hdr.events(fill=True), range(total_len)))\n namelist = detector_list + hdr.start.motors + scaler_list\n dictv = {v: [] for v in namelist}\n for e in evs:\n for k, v in dictv.items():\n dictv[k].append(e.data[k])\n data = pd.DataFrame(dictv, index=np.arange(1, total_len + 1)) # need to start with 1\n\n # Commented by DG: Just use the detector names from .json configuration file. Do not delete the code.\n # express3 detector name changes in databroker\n # if xrf_detector_names[0] not in data.keys():\n # xrf_detector_names = ['xs_channel'+str(i) for i in range(1,4)]\n # config_data['xrf_detector'] = xrf_detector_names\n\n if output_to_file:\n if \"xs\" in hdr.start.detectors:\n logger.info(\"Saving data to hdf file: Xpress3 detector #1 (three channels).\")\n root, ext = os.path.splitext(fpath)\n fpath_out = f\"{root + '_xs'}{ext}\"\n data_out = assemble_data_SRX_stepscan(\n data,\n datashape,\n det_list=detector_list,\n pos_list=hdr.start.motors,\n scaler_list=scaler_list,\n fname_add_version=fname_add_version,\n create_each_det=create_each_det,\n fly_type=fly_type,\n base_val=config_data[\"base_value\"],\n ) # base value shift for ic\n fpath_out = save_data_to_hdf5(\n fpath_out,\n data_out,\n metadata=mdata,\n fname_add_version=fname_add_version,\n file_overwrite_existing=file_overwrite_existing,\n create_each_det=create_each_det,\n )\n d_dict = {\"dataset\": data_out, \"file_name\": fpath_out, \"detector_name\": \"xs\", \"metadata\": mdata}\n data_output.append(d_dict)\n\n if \"xs2\" in hdr.start.detectors:\n logger.info(\"Saving data to hdf file: Xpress3 detector #2 (single channel).\")\n root, ext = os.path.splitext(fpath)\n fpath_out = f\"{root}_xs2{ext}\"\n data_out = assemble_data_SRX_stepscan(\n data,\n datashape,\n # The following must be XS2 detectors (not present in 'old' step scans)\n det_list=config_data[\"xrf_detector2\"],\n pos_list=hdr.start.motors,\n scaler_list=scaler_list,\n fname_add_version=fname_add_version,\n create_each_det=create_each_det,\n fly_type=fly_type,\n base_val=config_data[\"base_value\"],\n ) # base value shift for ic\n fpath_out = save_data_to_hdf5(\n fpath_out,\n data_out,\n metadata=mdata,\n fname_add_version=fname_add_version,\n file_overwrite_existing=file_overwrite_existing,\n create_each_det=create_each_det,\n )\n d_dict = {\"dataset\": data_out, \"file_name\": fpath_out, \"detector_name\": \"xs\", \"metadata\": mdata}\n data_output.append(d_dict)\n\n fln_list = [_[\"file_name\"] for _ in data_output]\n logger.debug(f\"Step scan data was saved to the following files: {fln_list}\")\n\n return data_output\n\n else:\n\n print()\n print(\"****************************************\")\n print(\" Loading SRX fly scan \")\n print(\"****************************************\")\n\n if save_scaler is True:\n scaler_list = [\"i0\", \"time\", \"i0_time\", \"time_diff\"]\n xpos_name = \"enc1\"\n ypos_name = \"hf_stage_y\" # 'hf_stage_x' if fast axis is vertical\n\n # The dictionary of fields that are used to store data from different detectors (for fly scan only)\n # key - the name of the field used to store data read from the detector\n # value - the detector name (probably short abbreviation, attached to the created file name so that\n # the detector could be identified)\n # A separate data file is created for each detector\n detector_field_dict = config_data[\"xrf_flyscan_detector_fields\"]\n\n num_det = 0 # Some default value (should never be used)\n\n # Added by AMK to allow flying of single element on xs2\n # if 'E_tomo' in start_doc['scaninfo']['type']:\n # num_det = 1\n # ypos_name = 'e_tomo_y'\n # else:\n # num_det = 3\n vertical_fast = False # assuming fast on x as default\n if num_end_lines_excluded is None:\n # vertical first then horizontal, assuming fast scan on x\n datashape = [start_doc[\"shape\"][1], start_doc[\"shape\"][0]]\n else:\n datashape = [start_doc[\"shape\"][1] - num_end_lines_excluded, start_doc[\"shape\"][0]]\n\n using_nanostage = \"nanoZebra\" in hdr.start.detectors\n\n if using_nanostage:\n # There should also be a source of 'z' positions\n xpos_name, ypos_name = \"enc1\", \"enc2\"\n # Note: the following block doesn't make sence for the setup with nanostage\n # The following condition will be more complicated when 'slow_axis' is\n # added to the metadata.\n # if hdr.start.scaninfo['fast_axis'] == \"NANOVER\":\n # xpos_name, ypos_name = ypos_name, xpos_name\n # vertical_fast = True\n else:\n if \"fast_axis\" in hdr.start.scaninfo:\n # fast scan along vertical, y is fast scan, x is slow\n if hdr.start.scaninfo[\"fast_axis\"] in (\"VER\", \"DET2VER\"):\n xpos_name = \"enc1\"\n ypos_name = \"hf_stage_x\"\n if \"E_tomo\" in start_doc[\"scaninfo\"][\"type\"]:\n ypos_name = \"e_tomo_x\"\n vertical_fast = True\n # fast vertical scan put shape[0] as vertical direction\n # datashape = [start_doc['shape'][0], start_doc['shape'][1]]\n\n new_shape = datashape + [spectrum_len]\n # total_points = datashape[0]*datashape[1]\n\n des = [d for d in hdr.descriptors if d.name == \"stream0\"][0]\n # merlin data doesn't need to be saved.\n # un_used_det = ['merlin', 'im'] # data not to be transfered for pyxrf\n # data_list_used = [v for v in des.data_keys.keys() if 'merlin' not in v.lower()]\n\n # The number of found detectors for which data exists in the database\n n_detectors_found = 0\n\n # Try each data field listed in the config file\n for detector_field, detector_name in detector_field_dict.items():\n\n # Assume that Databroker caches the tables locally, so that data will not be reloaded\n e = hdr.events(fill=True, stream_name=des.name)\n\n new_data = {}\n data = {}\n\n if save_scaler is True:\n new_data[\"scaler_names\"] = scaler_list\n scaler_tmp = np.zeros([datashape[0], datashape[1], len(scaler_list)])\n if vertical_fast is True: # data shape only has impact on scaler data\n scaler_tmp = np.zeros([datashape[1], datashape[0], len(scaler_list)])\n key_list = scaler_list + [xpos_name]\n if using_nanostage:\n key_list += [ypos_name]\n for v in key_list:\n data[v] = np.zeros([datashape[0], datashape[1]])\n\n # Total number of lines in fly scan\n n_scan_lines_total = new_shape[0]\n\n detector_field_exists = True\n\n # This 'try' block was added in response to the request to retrieve data after\n # detector failure (empty files were saved by Xpress3). The program is supposed\n # to retrieve 'good' data from the scan.\n try:\n for m, v in enumerate(e):\n if m == 0:\n\n # Check if detector field does not exist. If not, then the file should not be created.\n if detector_field not in v.data:\n detector_field_exists = False\n break\n\n print()\n print(f\"Collecting data from detector '{detector_name}' (field '{detector_field}')\")\n\n # Determine the number of channels from the size of the table with fluorescence data\n num_det = v.data[detector_field].shape[1]\n\n # Now allocate space for fluorescence data\n if create_each_det is False:\n new_data[\"det_sum\"] = np.zeros(new_shape, dtype=np.float32)\n else:\n for i in range(num_det):\n new_data[f\"det{i + 1}\"] = np.zeros(new_shape, dtype=np.float32)\n\n print(f\"Number of the detector channels: {num_det}\")\n\n if m < datashape[0]: # scan is not finished\n if save_scaler is True:\n for n in scaler_list[:-1] + [xpos_name]:\n min_len = min(v.data[n].size, datashape[1])\n data[n][m, :min_len] = v.data[n][:min_len]\n # position data or i0 has shorter length than fluor data\n if min_len < datashape[1]:\n len_diff = datashape[1] - min_len\n # interpolation on scaler data\n interp_list = (v.data[n][-1] - v.data[n][-3]) / 2 * np.arange(\n 1, len_diff + 1\n ) + v.data[n][-1]\n data[n][m, min_len : datashape[1]] = interp_list\n fluor_len = v.data[detector_field].shape[0]\n if m > 0 and not (m % 10):\n print(f\"Processed {m} of {n_scan_lines_total} lines ...\")\n # print(f\"m = {m} Data shape {v.data['fluor'].shape} - {v.data['fluor'].shape[1] }\")\n # print(f\"Data keys: {v.data.keys()}\")\n if create_each_det is False:\n for i in range(num_det):\n # in case the data length in each line is different\n new_data[\"det_sum\"][m, :fluor_len, :] += v.data[detector_field][:, i, :]\n else:\n for i in range(num_det):\n # in case the data length in each line is different\n new_data[\"det\" + str(i + 1)][m, :fluor_len, :] = v.data[detector_field][:, i, :]\n\n except Exception as ex:\n logger.error(f\"Error occurred while reading data: {ex}. Trying to retrieve available data ...\")\n\n # If the detector field does not exist, then try the next one from the list\n if not detector_field_exists:\n continue\n\n # Modify file name (path) to include data on how many channels are included in the file and how many\n # channels are used for sum calculation\n root, ext = os.path.splitext(fpath)\n s = f\"_{detector_name}_sum{num_det}ch\"\n if create_each_det:\n s += f\"+{num_det}ch\"\n fpath_out = f\"{root}{s}{ext}\"\n\n if vertical_fast is True: # need to transpose the data, as we scan y first\n if create_each_det is False:\n new_data[\"det_sum\"] = np.transpose(new_data[\"det_sum\"], axes=(1, 0, 2))\n else:\n for i in range(num_det):\n new_data[\"det\" + str(i + 1)] = np.transpose(new_data[\"det\" + str(i + 1)], axes=(1, 0, 2))\n\n if save_scaler is True:\n if vertical_fast is False:\n for i, v in enumerate(scaler_list[:-1]):\n scaler_tmp[:, :, i] = data[v]\n scaler_tmp[:, :-1, -1] = np.diff(data[\"time\"], axis=1)\n scaler_tmp[:, -1, -1] = data[\"time\"][:, -1] - data[\"time\"][:, -2]\n else:\n for i, v in enumerate(scaler_list[:-1]):\n scaler_tmp[:, :, i] = data[v].T\n data_t = data[\"time\"].T\n scaler_tmp[:-1, :, -1] = np.diff(data_t, axis=0)\n scaler_tmp[-1, :, -1] = data_t[-1, :] - data_t[-2, :]\n new_data[\"scaler_data\"] = scaler_tmp\n x_pos = np.vstack(data[xpos_name])\n\n if using_nanostage:\n y_pos0 = np.vstack(data[ypos_name])\n else:\n # get y position data, from differet stream name primary\n data1 = hdr.table(fill=True, stream_name=\"primary\")\n if num_end_lines_excluded is not None:\n data1 = data1[: datashape[0]]\n # if ypos_name not in data1.keys() and 'E_tomo' not in start_doc['scaninfo']['type']:\n # print(f\"data1 keys: {data1.keys()}\")\n if ypos_name not in data1.keys():\n ypos_name = \"hf_stage_z\" # vertical along z\n y_pos0 = np.hstack(data1[ypos_name])\n\n # Original comment (from the previous authors):\n # y position is more than actual x pos, scan not finished?\n #\n # The following (temporary) fix assumes that incomplete scan contains\n # at least two completed lines. The step between the scanned lines\n # may be used to recreate y-coordinates for the lines that were not\n # scanned: data for those lines will be filled with zeros.\n # Having some reasonable y-coordinate data for the missed lines\n # will allow to plot and process the data even if the scan is incomplete.\n # In the case if scan contain only one line, there is no reliable way\n # to to generate coordinates, use the same step as for x coordinates\n # or 1 if the first scannned line contains only one point.\n\n # First check if the scan of the last line was completed. If not,\n # then x-coordinates of the scan points are all ZERO\n last_scan_line_no_data = False\n if math.isclose(np.sum(x_pos[x_pos.shape[0] - 1, :]), 0.0, abs_tol=1e-20):\n last_scan_line_no_data = True\n\n no_position_data = False\n if len(y_pos0) == 0 or (len(y_pos0) == 1 and last_scan_line_no_data):\n no_position_data = True\n print(\"WARNING: The scan contains no completed scan lines\")\n\n if len(y_pos0) < x_pos.shape[0] and len(y_pos0) > 1:\n # The number of the lines for which the scan was initiated\n # Unfortunately this is not the number of scanned lines,\n # so x-axis values need to be restored for the line #'n_scanned_lines - 1' !!!\n n_scanned_lines = len(y_pos0)\n print(f\"WARNING: The scan is not completed: {n_scanned_lines} out of {x_pos.shape[0]} lines\")\n y_step = 1\n if n_scanned_lines > 1:\n y_step = (y_pos0[-1] - y_pos0[0]) / (n_scanned_lines - 1)\n elif x_pos.shape[1] > 1:\n # Set 'y_step' equal to the absolute value of 'x_step'\n # this is just to select some reasonable scale and happens if\n # only one line was completed in the unfinished flyscan.\n # This is questionable decision, but it should be rarely applied\n y_step = math.fabs((x_pos[0, -1] - x_pos[0, 0]) / (x_pos.shape[1] - 1))\n # Now use 'y_step' to generate the remaining points\n n_pts = x_pos.shape[0] - n_scanned_lines\n v_start = y_pos0[-1] + y_step\n v_stop = v_start + (n_pts - 1) * y_step\n y_pos_filled = np.linspace(v_start, v_stop, n_pts)\n y_pos0 = np.append(y_pos0, y_pos_filled)\n # Now duplicate x-coordinate values from the last scanned line to\n # all the unscanned lines, otherwise they all will be zeros\n for n in range(n_scanned_lines - 1, x_pos.shape[0]):\n x_pos[n, :] = x_pos[n_scanned_lines - 2, :]\n\n elif x_pos.shape[0] > 1 and last_scan_line_no_data:\n # One possible scenario is that if the scan was interrupted while scanning\n # the last line. In this case the condition\n # len(y_pos0) >= x_pos.shape[0]\n # will hold, but the last line of x-coordinates will be filleds with\n # zeros, which will create a mess if data is plotted with PyXRF\n # To fix the problem, fill the last line with values from the previous line\n x_pos[-1, :] = x_pos[-2, :]\n\n # The following condition check is left from the existing code. It is still checking\n # for the case if 0 lines were scanned.\n if len(y_pos0) >= x_pos.shape[0] and not no_position_data:\n if using_nanostage:\n yv = y_pos0\n else:\n y_pos = y_pos0[: x_pos.shape[0]]\n x_tmp = np.ones(x_pos.shape[1])\n xv, yv = np.meshgrid(x_tmp, y_pos)\n # need to change shape to sth like [2, 100, 100]\n data_tmp = np.zeros([2, x_pos.shape[0], x_pos.shape[1]])\n data_tmp[0, :, :] = x_pos\n data_tmp[1, :, :] = yv\n new_data[\"pos_data\"] = data_tmp\n new_data[\"pos_names\"] = [\"x_pos\", \"y_pos\"]\n if vertical_fast is True: # need to transpose the data, as we scan y first\n # fast scan on y has impact for scaler data\n data_tmp = np.zeros([2, x_pos.shape[1], x_pos.shape[0]])\n data_tmp[1, :, :] = x_pos.T\n data_tmp[0, :, :] = yv.T\n new_data[\"pos_data\"] = data_tmp\n\n else:\n print(\"WARNING: Scan was interrupted: x,y positions are not saved\")\n\n n_detectors_found += 1\n\n if output_to_file:\n # output to file\n print(f\"Saving data to hdf file #{n_detectors_found}: Detector: {detector_name}.\")\n fpath_out = save_data_to_hdf5(\n fpath_out,\n new_data,\n metadata=mdata,\n fname_add_version=fname_add_version,\n file_overwrite_existing=file_overwrite_existing,\n create_each_det=create_each_det,\n )\n\n # Preparing data for the detector ``detector_name`` for output\n d_dict = {\n \"dataset\": new_data,\n \"file_name\": fpath_out,\n \"detector_name\": detector_name,\n \"metadata\": mdata,\n }\n data_output.append(d_dict)\n\n print()\n if n_detectors_found == 0:\n print(\"ERROR: no data from known detectors were found in the database:\")\n print(\" Check that appropriate fields are included in 'xrf_fly_scan_detector_fields'\")\n print(f\" of configuration file: {config_path}\")\n else:\n print(f\"Total of {n_detectors_found} detectors were found\", end=\"\")\n if output_to_file:\n print(f\", {n_detectors_found} data files were created\", end=\"\")\n print(\".\")\n\n fln_list = [_[\"file_name\"] for _ in data_output]\n logger.debug(f\"Fly scan data was saved to the following files: {fln_list}\")\n\n return data_output\n\n\ndef map_data2D_srx_new(\n run_id_uid,\n fpath,\n create_each_det=False,\n fname_add_version=False,\n completed_scans_only=False,\n successful_scans_only=False,\n file_overwrite_existing=False,\n output_to_file=True,\n save_scaler=True,\n num_end_lines_excluded=None,\n):\n\n if num_end_lines_excluded:\n logger.warning(\n \"The data loading function for new SRX format does not support the parameter \"\n \"'num_end_lines_excluded' ({num_end_lines_excluded}). All available data will \"\n \"be included in the output file.\"\n )\n\n hdr = db[run_id_uid]\n start_doc = hdr.start\n runid = start_doc[\"scan_id\"] # Replace with the true value (runid may be relative, such as -2)\n\n print(\"**********************************************************\")\n print(f\"Loading scan #{runid}\")\n print(f\"Scan metadata format: version {start_doc['md_version']}\")\n\n if completed_scans_only and not _is_scan_complete(hdr):\n raise Exception(\"Scan is incomplete. Only completed scans are currently processed.\")\n if successful_scans_only and not _is_scan_successful(hdr):\n raise Exception(\n \"Scan is not successfully completed. Only successfully completed scans are currently processed.\"\n )\n\n scan_doc = start_doc[\"scan\"]\n stop_doc = hdr.stop\n\n print(f\"Scan type: {scan_doc['type']}\")\n\n # Check for detectors\n dets = []\n try:\n if \"xs\" in hdr.start[\"scan\"][\"detectors\"]:\n dets.append(\"xs\")\n elif \"xs2\" in hdr.start[\"scan\"][\"detectors\"]:\n dets.append(\"xs2\")\n except KeyError:\n # AMK forgot to add detectors to step scans\n # This is fixed, but left in for those scans\n if scan_doc[\"type\"] == \"XRF_STEP\":\n dets.append(\"xs\")\n\n if not (dets):\n raise IOError(\"No detectors found!\")\n\n # Get metadata\n mdata = _extract_metadata_from_header(hdr)\n\n v = _get_metadata_value_from_descriptor_document(hdr, data_key=\"ring_current\", stream_name=\"baseline\")\n if v is not None:\n mdata[\"instrument_beam_current\"] = v\n\n for ax in [\"X\", \"Y\", \"Z\"]:\n v = _get_metadata_all_from_descriptor_document(\n hdr, data_key=f\"nanoKB_interferometer_pos{ax}\", stream_name=\"baseline\"\n )\n if v is not None:\n mdata[f\"param_interferometer_pos{ax}\"] = v\n\n # Get position data from scan\n n_scan_fast, n_scan_slow = hdr.start[\"scan\"][\"shape\"]\n\n # ===================================================================\n # NEW SRX FLY SCAN\n # ===================================================================\n if scan_doc[\"type\"] == \"XRF_FLY\":\n fast_motor = scan_doc[\"fast_axis\"][\"motor_name\"]\n if fast_motor == \"nano_stage_sx\":\n fast_key = \"enc1\"\n elif fast_motor == \"nano_stage_x\":\n fast_key = \"enc1\"\n elif fast_motor == \"nano_stage_sy\":\n fast_key = \"enc2\"\n elif fast_motor == \"nano_stage_y\":\n fast_key = \"enc2\"\n elif fast_motor == \"nano_stage_sz\":\n fast_key = \"enc3\"\n else:\n raise IOError(f\"{fast_motor} not found!\")\n\n slow_motor = scan_doc[\"slow_axis\"][\"motor_name\"]\n if slow_motor == \"nano_stage_sx\":\n slow_key = \"enc1\"\n elif slow_motor == \"nano_stage_x\":\n slow_key = \"enc1\"\n elif slow_motor == \"nano_stage_sy\":\n slow_key = \"enc2\"\n elif slow_motor == \"nano_stage_y\":\n slow_key = \"enc2\"\n elif slow_motor == \"nano_stage_sz\":\n slow_key = \"enc3\"\n else:\n slow_key = slow_motor\n\n # Let's get the data using the events! Yay!\n e = hdr.events(\"stream0\", fill=True)\n ep = hdr.events(\"primary\", fill=True)\n d_xs, d_xs_sum, N_xs = [], [], 0\n d_xs2, d_xs2_sum, N_xs2 = [], [], 0\n sclr_list = [\"i0\", \"i0_time\", \"time\", \"im\", \"it\"]\n sclr_dict = {}\n fast_pos, slow_pos = [], []\n\n n_recorded_events = 0\n\n try:\n for m, v in enumerate(e):\n if \"xs\" in dets:\n event_data = v[\"data\"][\"fluor\"]\n N_xs = max(N_xs, event_data.shape[1])\n d_xs_sum.append(np.sum(event_data, axis=1))\n if create_each_det:\n d_xs.append(event_data)\n if \"xs2\" in dets:\n event_data = v[\"data\"][\"fluor_xs2\"]\n N_xs2 = max(N_xs2, event_data.shape[1])\n d_xs2_sum.append(np.sum(event_data, axis=1))\n if create_each_det:\n d_xs2.append(event_data)\n keys = v[\"data\"].keys()\n for s in sclr_list:\n if s in keys:\n tmp = np.array(v[\"data\"][s])\n if s not in sclr_dict:\n sclr_dict[s] = [tmp]\n else:\n sclr_dict[s].append(tmp)\n\n fast_pos.append(np.array(v[\"data\"][fast_key]))\n if \"enc\" not in slow_key:\n vp = next(ep)\n tmp = np.array(vp[\"data\"][slow_key])\n tmp2 = [tmp] * n_scan_fast\n else:\n tmp2 = v[\"data\"][slow_key]\n slow_pos.append(np.array(tmp2))\n\n n_recorded_events = m + 1\n\n if m > 0 and not (m % 10):\n print(f\"Processed lines: {m}\")\n\n except Exception as ex:\n logger.error(f\"Error occurred while reading data: {ex}. Trying to retrieve available data ...\")\n\n def repair_set(dset_list, n_row_pts):\n \"\"\"\n Replaces corrupt rows (incorrect number of points) with closest 'good' row. This allows to load\n and use data from corrupt scans. The function will have no effect on 'good' scans.\n If there are no rows with correct number of points (unlikely case), then the array remains unchanged.\n \"\"\"\n missed_rows = []\n n_last_good_row = -1\n for n in range(len(dset_list)):\n d = dset_list[n]\n n_pts = d.shape[0]\n if n_pts != n_row_pts:\n print(f\"WARNING: Row #{n + 1} has {n_pts} data points. {n_row_pts} points are expected.\")\n if n_last_good_row == -1:\n missed_rows.append(n)\n else:\n dset_list[n] = np.array(dset_list[n_last_good_row])\n print(f\"Data in row #{n + 1} is replaced by data from row #{n_last_good_row}\")\n else:\n n_last_good_row = n\n if missed_rows:\n for nr in missed_rows:\n dset_list[nr] = np.array(dset_list[n_last_good_row])\n print(f\"Data in row #{nr + 1} is replaced by data from row #{n_last_good_row}\")\n missed_rows = []\n\n sclr_name = list(sclr_dict.keys())\n\n repair_set(d_xs_sum, n_scan_fast)\n repair_set(d_xs, n_scan_fast)\n repair_set(d_xs2_sum, n_scan_fast)\n repair_set(d_xs2, n_scan_fast)\n repair_set(fast_pos, n_scan_fast)\n repair_set(slow_pos, n_scan_fast)\n for sc in sclr_dict.values():\n repair_set(sc, n_scan_fast)\n\n pos_pos = np.zeros((2, n_recorded_events, n_scan_fast))\n if \"x\" in slow_key:\n pos_pos[1, :, :] = fast_pos\n pos_pos[0, :, :] = slow_pos\n else:\n pos_pos[0, :, :] = fast_pos\n pos_pos[1, :, :] = slow_pos\n pos_name = [\"x_pos\", \"y_pos\"]\n\n if n_recorded_events != n_scan_slow:\n logger.error(\n \"The number of recorded events (%d) is not equal to the expected number of events (%d): \"\n \"The scan is incomplete.\",\n n_recorded_events,\n n_scan_slow,\n )\n\n # The following arrays may be empty if 'create_each_det == False' or the detector is not used.\n d_xs = np.asarray(d_xs)\n d_xs_sum = np.asarray(d_xs_sum)\n d_xs2 = np.asarray(d_xs2)\n d_xs2_sum = np.asarray(d_xs2_sum)\n\n sclr = np.zeros((n_recorded_events, n_scan_fast, len(sclr_name)))\n for n, sname in enumerate(sclr_name):\n sclr[:, :, n] = np.asarray(sclr_dict[sname])\n\n # ===================================================================\n # NEW SRX STEP SCAN\n # ===================================================================\n if scan_doc[\"type\"] == \"XRF_STEP\":\n # Define keys for motor data\n fast_motor = scan_doc[\"fast_axis\"][\"motor_name\"]\n fast_key = fast_motor + \"_user_setpoint\"\n slow_motor = scan_doc[\"slow_axis\"][\"motor_name\"]\n slow_key = slow_motor + \"_user_setpoint\"\n\n # Collect motor positions\n fast_pos = hdr.data(fast_key, stream_name=\"primary\", fill=True)\n fast_pos = np.array(list(fast_pos))\n slow_pos = hdr.data(slow_key, stream_name=\"primary\", fill=True)\n slow_pos = np.array(list(slow_pos))\n\n # Reshape motor positions\n num_events = stop_doc[\"num_events\"][\"primary\"]\n n_scan_slow, n_scan_fast = scan_doc[\"shape\"]\n if num_events != (n_scan_slow * n_scan_fast):\n num_rows = num_events // n_scan_fast + 1 # number of rows\n fast_pos = np.zeros((num_rows, n_scan_fast))\n slow_pos = np.zeros((num_rows, n_scan_fast))\n for i in range(num_rows):\n for j in range(n_scan_fast):\n fast_pos[i, j] = fast_pos[i * n_scan_fast + j]\n slow_pos[i, j] = slow_pos[i * n_scan_fast + j]\n else:\n num_rows = n_scan_slow\n fast_pos = np.reshape(fast_pos, (n_scan_slow, n_scan_fast))\n slow_pos = np.reshape(slow_pos, (n_scan_slow, n_scan_fast))\n\n # Put into one array for h5 file\n pos_pos = np.zeros((2, num_rows, n_scan_fast))\n if \"x\" in slow_key:\n pos_pos[1, :, :] = fast_pos\n pos_pos[0, :, :] = slow_pos\n else:\n pos_pos[0, :, :] = fast_pos\n pos_pos[1, :, :] = slow_pos\n pos_name = [\"x_pos\", \"y_pos\"]\n\n # Get detector data\n keys = hdr.table().keys()\n MAX_DET_ELEMENTS = 8\n N_xs, det_name_prefix = None, None\n for i in np.arange(1, MAX_DET_ELEMENTS + 1):\n if f\"xs_channel{i}\" in keys:\n N_xs, det_name_prefix = i, \"xs_channel\"\n elif f\"xs_channels_channel{i:02d}\" in keys:\n N_xs, det_name_prefix = i, \"xs_channels_channel\"\n else:\n break\n N_pts = num_events\n N_bins = 4096\n if \"xs\" in dets:\n d_xs = np.empty((N_xs, N_pts, N_bins))\n for i in np.arange(0, N_xs):\n if det_name_prefix == \"xs_channel\":\n dname = det_name_prefix + f\"{i + 1}\"\n else:\n dname = det_name_prefix + f\"{i + 1:02d}\"\n d = hdr.data(dname, fill=True)\n d = np.array(list(d))\n d_xs[i, :, :] = np.copy(d)\n del d\n # Reshape data\n if num_events != (n_scan_slow * n_scan_fast):\n tmp = np.zeros((N_xs, num_rows, n_scan_fast, N_bins))\n for i in range(num_rows):\n for j in range(n_scan_fast):\n tmp[:, i, j, :] = fast_pos[:, i * n_scan_fast + j, :]\n d_xs = np.copy(tmp)\n del tmp\n else:\n d_xs = np.reshape(d_xs, (N_xs, n_scan_slow, n_scan_fast, N_bins))\n # Sum data\n d_xs_sum = np.squeeze(np.sum(d_xs, axis=0))\n\n # Scaler list\n sclr_list = [\"sclr_i0\", \"sclr_im\", \"sclr_it\"]\n sclr_name = []\n for s in sclr_list:\n if s in keys:\n sclr_name.append(s)\n sclr = np.array(hdr.table()[sclr_name].values)\n # Reshape data\n if num_events != (n_scan_slow * n_scan_fast):\n tmp = np.zeros((num_rows, n_scan_fast))\n for i in range(num_rows):\n for j in range(n_scan_fast):\n tmp[i, j] = fast_pos[i * n_scan_fast + j]\n sclr = np.copy(tmp)\n del tmp\n else:\n sclr = np.reshape(sclr, (n_scan_slow, n_scan_fast, len(sclr_name)))\n\n # Consider snake\n # pos_pos, d_xs, d_xs_sum, sclr\n if scan_doc[\"snake\"] == 1:\n pos_pos[:, 1::2, :] = pos_pos[:, 1::2, ::-1]\n if \"xs\" in dets:\n if d_xs.size:\n d_xs[:, 1::2, :, :] = d_xs[:, 1::2, ::-1, :]\n if d_xs_sum.size:\n d_xs_sum[1::2, :, :] = d_xs_sum[1::2, ::-1, :]\n if \"xs2\" in dets:\n if d_xs2.size:\n d_xs2[:, 1::2, :, :] = d_xs2[:, 1::2, ::-1, :]\n if d_xs2_sum.size:\n d_xs2_sum[1::2, :, :] = d_xs2_sum[1::2, ::-1, :]\n sclr[1::2, :, :] = sclr[1::2, ::-1, :]\n\n def swap_axes():\n nonlocal pos_name, pos_pos, d_xs, d_xs_sum, d_xs2, d_xs2_sum, sclr\n # Need to swapaxes on pos_pos, d_xs, d_xs_sum, sclr\n pos_name = pos_name[::-1]\n pos_pos = np.swapaxes(pos_pos, 1, 2)\n if \"xs\" in dets:\n if d_xs.size:\n d_xs = np.swapaxes(d_xs, 0, 1)\n if d_xs_sum.size:\n d_xs_sum = np.swapaxes(d_xs_sum, 0, 1)\n if \"xs2\" in dets:\n if d_xs2.size:\n d_xs2 = np.swapaxes(d_xs2, 0, 1)\n if d_xs2_sum.size:\n d_xs2_sum = np.swapaxes(d_xs2_sum, 0, 1)\n sclr = np.swapaxes(sclr, 0, 1)\n\n if scan_doc[\"type\"] == \"XRF_FLY\":\n if fast_motor in (\"nano_stage_sy\", \"nano_stage_y\"):\n swap_axes()\n elif scan_doc[\"type\"] == \"XRF_STEP\":\n if \"xs\" in dets:\n d_xs = np.swapaxes(d_xs, 0, 1)\n d_xs = np.swapaxes(d_xs, 1, 2)\n if \"xs2\" in dets:\n d_xs2 = np.swapaxes(d_xs2, 0, 1)\n d_xs2 = np.swapaxes(d_xs2, 1, 2)\n if fast_motor not in (\"nano_stage_sy\", \"nano_stage_y\"):\n swap_axes()\n pos_name = pos_name[::-1] # Swap the positions back\n else:\n pos_name = pos_name[::-1] # Swap the positions back\n\n print(\"Data is loaded successfully. Preparing to save data ...\")\n\n data_output = []\n\n for detector_name in dets:\n if detector_name == \"xs\":\n tmp_data = d_xs\n tmp_data_sum = d_xs_sum\n num_det = N_xs\n elif detector_name == \"xs2\":\n tmp_data = d_xs2\n tmp_data_sum = d_xs2_sum\n num_det = N_xs2\n\n loaded_data = {}\n loaded_data[\"det_sum\"] = tmp_data_sum\n if create_each_det:\n for i in range(num_det):\n loaded_data[\"det\" + str(i + 1)] = np.squeeze(tmp_data[:, :, i, :])\n\n if save_scaler:\n loaded_data[\"scaler_data\"] = sclr\n loaded_data[\"scaler_names\"] = sclr_name\n\n loaded_data[\"pos_data\"] = pos_pos\n loaded_data[\"pos_names\"] = pos_name\n\n # Generate the default file name for the scan\n if fpath is None:\n fpath = f\"scan2D_{runid}.h5\"\n\n # Modify file name (path) to include data on how many channels are included in the file and how many\n # channels are used for sum calculation\n root, ext = os.path.splitext(fpath)\n s = f\"_{detector_name}_sum{num_det}ch\"\n if create_each_det:\n s += f\"+{num_det}ch\"\n fpath_out = f\"{root}{s}{ext}\"\n\n if output_to_file:\n # output to file\n print(f\"Saving data to hdf file '{fpath_out}': Detector: {detector_name}.\")\n fpath_out = save_data_to_hdf5(\n fpath_out,\n loaded_data,\n metadata=mdata,\n fname_add_version=fname_add_version,\n file_overwrite_existing=file_overwrite_existing,\n create_each_det=create_each_det,\n )\n\n d_dict = {\n \"dataset\": loaded_data,\n \"file_name\": fpath_out,\n \"detector_name\": detector_name,\n \"metadata\": mdata,\n }\n data_output.append(d_dict)\n\n return data_output\n\n\ndef map_data2D_tes(\n run_id_uid,\n fpath,\n create_each_det=False,\n fname_add_version=False,\n completed_scans_only=False,\n successful_scans_only=False,\n file_overwrite_existing=False,\n output_to_file=True,\n save_scaler=True,\n):\n \"\"\"\n Transfer the data from databroker into a correct format following the\n shape of 2D scan.\n This function is used at TES beamline for step scan.\n Save the new data dictionary to hdf5 file if needed.\n\n .. note::\n\n It is recommended to read data from databroker into memory\n directly, instead of saving to files. This is ongoing work.\n\n Parameters\n ----------\n run_id_uid : int\n ID or UID of a run\n fpath: str\n path to save hdf file\n create_each_det: bool, optional\n Do not create data for each detector if data size is too large,\n if set as False. This will slow down the speed of creating an hdf5 file\n with large data size.\n fname_add_version : bool\n True: if file already exists, then file version is added to the file name\n so that it becomes unique in the current directory. The version is\n added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.\n False: then conversion fails.\n completed_scans_only : bool\n True: process only completed scans (for which ``stop`` document exists in\n the database). Failed scan for which ``stop`` document exists are considered\n completed even if not the whole image was scanned. If incomplete scan is\n encountered: an exception is thrown.\n False: the feature is disabled, incomplete scan will be processed.\n file_overwrite_existing : bool, keyword parameter\n This option should be used if the existing file should be deleted and replaced\n with the new file with the same name. This option should be used with caution,\n since the existing file may contain processed data, which will be permanently deleted.\n True: overwrite existing files if needed. Note, that if ``fname_add_version`` is ``True``,\n then new versions of the existing file will always be created.\n False: do not overwrite existing files. If the file already exists, then the exception\n is raised.\n output_to_file : bool, optional\n save data to hdf5 file if True\n\n Returns\n -------\n dict of data in 2D format matching x,y scanning positions\n \"\"\"\n\n hdr = db[run_id_uid]\n runid = hdr.start[\"scan_id\"] # Replace with the true value (runid may be relative, such as -2)\n\n # The dictionary holding scan metadata\n mdata = _extract_metadata_from_header(hdr)\n # Some metadata is located at specific places in the descriptor documents\n # Search through the descriptor documents for the metadata\n v = _get_metadata_value_from_descriptor_document(hdr, data_key=\"mono_energy\", stream_name=\"baseline\")\n # Incident energy in the descriptor document is expected to be more accurate, so\n # overwrite the value if it already exists\n if v is not None:\n mdata[\"instrument_mono_incident_energy\"] = v / 1000.0 # eV to keV\n\n if completed_scans_only and not _is_scan_complete(hdr):\n raise Exception(\"Scan is incomplete. Only completed scans are currently processed.\")\n if successful_scans_only and not _is_scan_successful(hdr):\n raise Exception(\n \"Scan is not successfully completed. Only successfully completed scans are currently processed.\"\n )\n\n # Generate the default file name for the scan\n if fpath is None:\n fpath = f\"scan2D_{runid}.h5\"\n\n # Load configuration file\n current_dir = os.path.dirname(os.path.realpath(__file__))\n config_file = \"tes_pv_config.json\"\n config_path = sep_v.join(current_dir.split(sep_v)[:-2] + [\"configs\", config_file])\n with open(config_path, \"r\") as json_data:\n config_data = json.load(json_data)\n\n # NOTE:\n # Currently implemented algorithm will work only with the following flyscan:\n # flyscanning along X-axis, stepping along Y-axis (to do otherwise or support both cases\n # the function has to be modified).\n # Each document will contain full data for a single line of N-point flyscan:\n # N-element arrays with values for X and Y axis\n # N-element arrays with values for each scaler\n # N fluorescent spectra (each spectrum is 4096 points, saved by Xspress3 into\n # separate file on GPFS, the document contains the path to file)\n\n print()\n print(\"****************************************\")\n print(\" Loading TES fly scan \")\n print(\"****************************************\")\n\n xpos_name = \"x_centers\" # For now, we always fly on stage_x (fast axis)\n ypos_name = \"y_centers\"\n\n # The dictionary of fields that are used to store data from different detectors (for fly scan only)\n # key - the name of the field used to store data read from the detector\n # value - the detector name (probably short abbreviation, attached to the created file name so that\n # the detector could be identified)\n # A separate data file is created for each detector\n\n # The following list will be used if the function is modified to work with multiple detectors\n # detector_field_dict = config_data['xrf_flyscan_detector_fields']\n\n spectrum_len = 4096 # It is typically fixed\n\n # Output data is the list of data structures for all available detectors\n data_output = []\n\n # The dictionary that will contain the data extracted from scan data\n # This data will be saved to file and/or loaded into processing software\n new_data = {}\n\n def _is_row_missing(row_data):\n \"\"\"\n Determine if the row is missing. Different versions of Databroker will return differnent value types.\n \"\"\"\n if row_data is None:\n return True\n elif isinstance(row_data, np.ndarray) and (row_data.size == 1) and (row_data == np.array(None)):\n # This is returned by databroker.v0\n return True\n elif not len(row_data):\n return True\n else:\n return False\n\n def _get_row_len(row_data):\n if _is_row_missing(row_data):\n return 0\n else:\n return len(row_data)\n\n # Typically the scalers are saved\n if save_scaler is True:\n # Read the scalers\n scaler_names = config_data[\"scaler_list\"]\n\n # Save all scaler names using lowercase letters\n scaler_names_lower = scaler_names.copy()\n for n in range(len(scaler_names)):\n scaler_names_lower[n] = scaler_names_lower[n].lower()\n new_data[\"scaler_names\"] = scaler_names_lower\n\n n_scalers = len(config_data[\"scaler_list\"])\n scaler_data = None\n data_shape = None\n for n, name in enumerate(scaler_names):\n s_data = hdr.table()[name]\n # Convert pandas dataframe to a list of ndarrays (.to_numpy())\n # and then stack the arrays into a single 2D array\n s_data = s_data.to_numpy()\n\n # Find maximum number of points in a row.\n n_max_points = -1 # Maximum number of points in the row\n for row_data in s_data:\n n_max_points = max(n_max_points, _get_row_len(row_data))\n\n # Fix for the issue: 'empty' rows in scaler data. Fill 'empty' row\n # with the nearest (preceding) row.\n # TODO: investigate the issue of 'empty' scaler ('dwell_time') rows at TES\n n_full = -1\n\n for _n in range(len(s_data)):\n if _is_row_missing(s_data[_n]) or (len(s_data[_n]) < n_max_points):\n n_full = _n\n break\n for _n in range(len(s_data)):\n # Data for the missing row is replaced by data from the previous 'good' row\n if _is_row_missing(s_data[_n]) or (len(s_data[_n]) < n_max_points):\n s_data[_n] = np.copy(s_data[n_full])\n logger.error(\n f\"Scaler '{name}': row #{_n} is corrupt or contains no data. \"\n f\"Replaced by data from row #{n_full}\"\n )\n else:\n n_full = _n\n\n s_data = np.vstack(s_data)\n if scaler_data is None:\n data_shape = s_data.shape\n scaler_data = np.zeros(shape=data_shape + (n_scalers,), dtype=float)\n scaler_data[:, :, n] = s_data\n new_data[\"scaler_data\"] = scaler_data\n\n # Read x-y coordinates\n new_data[\"pos_names\"] = [\"x_pos\", \"y_pos\"]\n pos_data = np.zeros(shape=(2,) + data_shape, dtype=float)\n # Convert pandas dataframes to 2D ndarrays\n pos_data[0, :, :] = np.vstack(hdr.table()[xpos_name].to_numpy())\n pos_data[1, :, :] = np.vstack(hdr.table()[ypos_name].to_numpy())\n new_data[\"pos_data\"] = pos_data\n\n detector_field = \"fluor\"\n\n # Read detector values (for single detector)\n detector_data = np.zeros(shape=data_shape + (spectrum_len,), dtype=np.float32)\n n_events = data_shape[0]\n n_events_found = 0\n e = hdr.events(fill=True, stream_name=\"primary\")\n\n n_pt_max = -1\n try:\n for n, v in enumerate(e):\n if n >= n_events:\n print(\"The number of lines is less than expected\")\n break\n data = v.data[detector_field]\n data_det1 = np.array(data[:, 0, :], dtype=np.float32)\n\n # The following is the fix for the case when data has corrupt row (smaller number of data points).\n # It will not work if the first row is corrupt.\n n_pt_max = max(data_det1.shape[0], n_pt_max)\n data_det1_adjusted = np.zeros([n_pt_max, data_det1.shape[1]])\n data_det1_adjusted[: data_det1.shape[0], :] = data_det1\n\n detector_data[n, :, :] = data_det1_adjusted\n n_events_found = n + 1\n except Exception as ex:\n logger.error(f\"Error occurred while reading data: {ex}. Trying to retrieve available data ...\")\n\n if n_events_found < n_events:\n print(\"The number of lines is less than expected. The experiment may be incomplete\")\n\n if n_events_found != n_events:\n # This will happen if data is corrupt, for example the experiment is interrupted prematurely.\n n_events_min = min(n_events_found, n_events)\n print(f\"The map is resized: data for only {n_events_min} rows is available\")\n detector_data = detector_data[:n_events_min, :, :]\n new_data[\"scaler_data\"] = new_data[\"scaler_data\"][:n_events_min, :, :]\n new_data[\"pos_data\"] = new_data[\"pos_data\"][:, :n_events_min, :]\n\n # Note: the following code assumes that the detector has only one channel.\n # If the detector is upgraded, the following code will have to be rewritten, but\n # the rest of the data loading procedure will have to be modified anyway.\n if create_each_det:\n new_data[\"det1\"] = detector_data\n else:\n new_data[\"det_sum\"] = detector_data\n\n num_det = 1\n detector_name = \"xs\"\n n_detectors_found = 1\n\n # Modify file name (path) to include data on how many channels are included in the file and how many\n # channels are used for sum calculation\n root, ext = os.path.splitext(fpath)\n s = f\"_{detector_name}_sum{num_det}ch\"\n if create_each_det:\n s += f\"+{num_det}ch\"\n fpath_out = f\"{root}{s}{ext}\"\n\n if output_to_file:\n # output to file\n print(f\"Saving data to hdf file #{n_detectors_found}: Detector: {detector_name}.\")\n fpath_out = save_data_to_hdf5(\n fpath_out,\n new_data,\n metadata=mdata,\n fname_add_version=fname_add_version,\n file_overwrite_existing=file_overwrite_existing,\n create_each_det=create_each_det,\n )\n\n d_dict = {\"dataset\": new_data, \"file_name\": fpath_out, \"detector_name\": detector_name, \"metadata\": mdata}\n data_output.append(d_dict)\n\n return data_output\n\n\ndef map_data2D_xfm(\n run_id_uid,\n fpath,\n create_each_det=False,\n fname_add_version=False,\n completed_scans_only=False,\n successful_scans_only=False,\n file_overwrite_existing=False,\n output_to_file=True,\n):\n \"\"\"\n Transfer the data from databroker into a correct format following the\n shape of 2D scan.\n This function is used at XFM beamline for step scan.\n Save the new data dictionary to hdf file if needed.\n\n .. note:: It is recommended to read data from databroker into memory\n directly, instead of saving to files. This is ongoing work.\n\n Parameters\n ----------\n run_id_uid : int\n ID or UID of a run\n fpath: str\n path to save hdf file\n create_each_det: bool, optional\n Do not create data for each detector is data size is too large,\n if set as false. This will slow down the speed of creating hdf file\n with large data size.\n fname_add_version : bool\n True: if file already exists, then file version is added to the file name\n so that it becomes unique in the current directory. The version is\n added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.\n False: then conversion fails.\n completed_scans_only : bool\n True: process only completed scans (for which ``stop`` document exists in\n the database). Failed scan for which ``stop`` document exists are considered\n completed even if not the whole image was scanned. If incomplete scan is\n encountered: an exception is thrown.\n False: the feature is disabled, incomplete scan will be processed.\n file_overwrite_existing : bool, keyword parameter\n This option should be used if the existing file should be deleted and replaced\n with the new file with the same name. This option should be used with caution,\n since the existing file may contain processed data, which will be permanently deleted.\n True: overwrite existing files if needed. Note, that if ``fname_add_version`` is ``True``,\n then new versions of the existing file will always be created.\n False: do not overwrite existing files. If the file already exists, then the exception\n is raised.\n output_to_file : bool, optional\n save data to hdf5 file if True\n\n Returns\n -------\n dict of data in 2D format matching x,y scanning positions\n \"\"\"\n hdr = db[run_id_uid]\n runid = hdr.start[\"scan_id\"] # Replace with the true value (runid may be relative, such as -2)\n\n if completed_scans_only and not _is_scan_complete(hdr):\n raise Exception(\"Scan is incomplete. Only completed scans are currently processed.\")\n if successful_scans_only and not _is_scan_successful(hdr):\n raise Exception(\n \"Scan is not successfully completed. Only successfully completed scans are currently processed.\"\n )\n\n # Generate the default file name for the scan\n if fpath is None:\n fpath = f\"scan2D_{runid}.h5\"\n\n # Output data is the list of data structures for all available detectors\n data_output = []\n\n # spectrum_len = 4096\n start_doc = hdr[\"start\"]\n # The dictionary holding scan metadata\n mdata = _extract_metadata_from_header(hdr)\n plan_n = start_doc.get(\"plan_name\")\n if \"fly\" not in plan_n: # not fly scan\n datashape = start_doc[\"shape\"] # vertical first then horizontal\n fly_type = None\n\n snake_scan = start_doc.get(\"snaking\")\n if snake_scan[1] is True:\n fly_type = \"pyramid\"\n\n current_dir = os.path.dirname(os.path.realpath(__file__))\n config_file = \"xfm_pv_config.json\"\n config_path = sep_v.join(current_dir.split(sep_v)[:-2] + [\"configs\", config_file])\n with open(config_path, \"r\") as json_data:\n config_data = json.load(json_data)\n\n # try except can be added later if scan is not completed.\n data = db.get_table(hdr, fill=True, convert_times=False)\n\n xrf_detector_names = config_data[\"xrf_detector\"]\n data_out = map_data2D(\n data,\n datashape,\n det_list=xrf_detector_names,\n pos_list=hdr.start.motors,\n create_each_det=create_each_det,\n scaler_list=config_data[\"scaler_list\"],\n fly_type=fly_type,\n )\n\n fpath_out = fpath\n\n if output_to_file:\n print(\"Saving data to hdf file.\")\n fpath_out = save_data_to_hdf5(\n fpath_out,\n data_out,\n metadata=mdata,\n fname_add_version=fname_add_version,\n file_overwrite_existing=file_overwrite_existing,\n create_each_det=create_each_det,\n )\n\n detector_name = \"xs\"\n d_dict = {\"dataset\": data_out, \"file_name\": fpath_out, \"detector_name\": detector_name, \"metadata\": mdata}\n data_output.append(d_dict)\n\n return data_output\n\n\ndef write_db_to_hdf(\n fpath,\n data,\n datashape,\n det_list=(\"xspress3_ch1\", \"xspress3_ch2\", \"xspress3_ch3\"),\n pos_list=(\"zpssx[um]\", \"zpssy[um]\"),\n scaler_list=(\"sclr1_ch3\", \"sclr1_ch4\"),\n fname_add_version=False,\n fly_type=None,\n subscan_dims=None,\n base_val=None,\n):\n \"\"\"\n Assume data is obained from databroker, and save the data to hdf file.\n This function can handle stopped/aborted scans.\n\n .. note:: This function should become part of suitcase\n\n Parameters\n ----------\n fpath: str\n path to save hdf file\n data : pandas.core.frame.DataFrame\n data from data broker\n datashape : tuple or list\n shape of two D image\n det_list : list, tuple, optional\n list of detector channels\n pos_list : list, tuple, optional\n list of pos pv\n scaler_list : list, tuple, optional\n list of scaler pv\n fname_add_version : bool\n True: if file already exists, then file version is added to the file name\n so that it becomes unique in the current directory. The version is\n added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.\n False: the exception is thrown if the file exists.\n \"\"\"\n interpath = \"xrfmap\"\n\n if os.path.exists(fpath):\n if fname_add_version:\n fpath = _get_fpath_not_existing(fpath)\n else:\n raise IOError(f\"'write_db_to_hdf': File '{fpath}' already exists.\")\n\n with h5py.File(fpath, \"a\") as f:\n\n sum_data = None\n new_v_shape = datashape[0] # to be updated if scan is not completed\n spectrum_len = 4096 # standard\n\n for n, c_name in enumerate(det_list):\n if c_name in data:\n detname = \"det\" + str(n + 1)\n dataGrp = f.create_group(interpath + \"/\" + detname)\n\n logger.info(\"read data from %s\" % c_name)\n channel_data = data[c_name]\n\n # new veritcal shape is defined to ignore zeros points caused by stopped/aborted scans\n new_v_shape = len(channel_data) // datashape[1]\n\n new_data = np.vstack(channel_data)\n new_data = new_data[: new_v_shape * datashape[1], :]\n\n new_data = new_data.reshape([new_v_shape, datashape[1], len(channel_data[1])])\n if new_data.shape[2] != spectrum_len:\n # merlin detector has spectrum len 2048\n # make all the spectrum len to 4096, to avoid unpredicted error in fitting part\n new_tmp = np.zeros([new_data.shape[0], new_data.shape[1], spectrum_len])\n new_tmp[:, :, : new_data.shape[2]] = new_data\n new_data = new_tmp\n if fly_type in (\"pyramid\",):\n new_data = flip_data(new_data, subscan_dims=subscan_dims)\n\n if sum_data is None:\n sum_data = np.copy(new_data)\n else:\n sum_data += new_data\n ds_data = dataGrp.create_dataset(\"counts\", data=new_data, compression=\"gzip\")\n ds_data.attrs[\"comments\"] = \"Experimental data from channel \" + str(n)\n\n # summed data\n dataGrp = f.create_group(interpath + \"/detsum\")\n\n if sum_data is not None:\n sum_data = sum_data.reshape([new_v_shape, datashape[1], spectrum_len])\n ds_data = dataGrp.create_dataset(\"counts\", data=sum_data, compression=\"gzip\")\n ds_data.attrs[\"comments\"] = \"Experimental data from channel sum\"\n\n # position data\n dataGrp = f.create_group(interpath + \"/positions\")\n\n pos_names, pos_data = get_name_value_from_db(pos_list, data, datashape)\n\n for i in range(len(pos_names)):\n if \"x\" in pos_names[i]:\n pos_names[i] = \"x_pos\"\n elif \"y\" in pos_names[i]:\n pos_names[i] = \"y_pos\"\n if \"x_pos\" not in pos_names or \"y_pos\" not in pos_names:\n pos_names = [\"x_pos\", \"y_pos\"]\n\n # need to change shape to sth like [2, 100, 100]\n data_temp = np.zeros([pos_data.shape[2], pos_data.shape[0], pos_data.shape[1]])\n for i in range(pos_data.shape[2]):\n data_temp[i, :, :] = pos_data[:, :, i]\n\n if fly_type in (\"pyramid\",):\n for i in range(data_temp.shape[0]):\n # flip position the same as data flip on det counts\n data_temp[i, :, :] = flip_data(data_temp[i, :, :], subscan_dims=subscan_dims)\n\n dataGrp.create_dataset(\"name\", data=helper_encode_list(pos_names))\n dataGrp.create_dataset(\"pos\", data=data_temp[:, :new_v_shape, :])\n\n # scaler data\n dataGrp = f.create_group(interpath + \"/scalers\")\n\n scaler_names, scaler_data = get_name_value_from_db(scaler_list, data, datashape)\n\n if fly_type in (\"pyramid\",):\n scaler_data = flip_data(scaler_data, subscan_dims=subscan_dims)\n\n dataGrp.create_dataset(\"name\", data=helper_encode_list(scaler_names))\n\n if base_val is not None: # base line shift for detector, for SRX\n base_val = np.array([base_val])\n if len(base_val) == 1:\n scaler_data = np.abs(scaler_data - base_val)\n else:\n for i in scaler_data.shape[2]:\n scaler_data[:, :, i] = np.abs(scaler_data[:, :, i] - base_val[i])\n\n dataGrp.create_dataset(\"val\", data=scaler_data[:new_v_shape, :])\n\n return fpath\n\n\ndef assemble_data_SRX_stepscan(\n data,\n datashape,\n det_list=(\"xspress3_ch1\", \"xspress3_ch2\", \"xspress3_ch3\"),\n pos_list=(\"zpssx[um]\", \"zpssy[um]\"),\n scaler_list=(\"sclr1_ch3\", \"sclr1_ch4\"),\n fname_add_version=False,\n create_each_det=True,\n fly_type=None,\n subscan_dims=None,\n base_val=None,\n):\n \"\"\"\n Convert stepscan data from SRX beamline obtained from databroker into the for accepted\n by ``write_db_to_hdf_base`` function.\n This function can handle stopped/aborted scans.\n\n Parameters\n ----------\n data : pandas.core.frame.DataFrame\n data from data broker\n datashape : tuple or list\n shape of two D image\n det_list : list, tuple, optional\n list of detector channels\n pos_list : list, tuple, optional\n list of pos pv\n scaler_list : list, tuple, optional\n list of scaler pv\n fname_add_version : bool\n True: if file already exists, then file version is added to the file name\n so that it becomes unique in the current directory. The version is\n added to <fname>.h5 in the form <fname>_(1).h5, <fname>_(2).h5, etc.\n False: the exception is thrown if the file exists.\n create_each_det: bool\n True: output dataset contains data for individual detectors, False: output\n dataset contains only sum of all detectors.\n \"\"\"\n\n data_assembled = {}\n\n sum_data = None\n new_v_shape = datashape[0] # to be updated if scan is not completed\n spectrum_len = 4096 # standard\n\n for n, c_name in enumerate(det_list):\n if c_name in data:\n detname = \"det\" + str(n + 1)\n channel_data = data[c_name]\n\n # new veritcal shape is defined to ignore zeros points caused by stopped/aborted scans\n new_v_shape = len(channel_data) // datashape[1]\n\n new_data = np.vstack(channel_data)\n new_data = new_data.astype(np.float32, copy=False) # Change representation to np.float32\n new_data = new_data[: new_v_shape * datashape[1], :]\n\n new_data = new_data.reshape([new_v_shape, datashape[1], len(channel_data[1])])\n if new_data.shape[2] != spectrum_len:\n # merlin detector has spectrum len 2048\n # make all the spectrum len to 4096, to avoid unpredicted error in fitting part\n new_tmp = np.zeros([new_data.shape[0], new_data.shape[1], spectrum_len])\n new_tmp[:, :, : new_data.shape[2]] = new_data\n new_data = new_tmp\n if fly_type in (\"pyramid\",):\n new_data = flip_data(new_data, subscan_dims=subscan_dims)\n\n if sum_data is None:\n sum_data = np.copy(new_data)\n else:\n sum_data += new_data\n\n if create_each_det:\n data_assembled[detname] = new_data\n\n if sum_data is not None:\n data_assembled[\"det_sum\"] = sum_data\n\n # position data\n pos_names, pos_data = get_name_value_from_db(pos_list, data, datashape)\n\n # I don't have knowledge of all possible scenarios to change the following algorithm for\n # naming 'x_pos' and 'y_pos'. It definitely covers the basic cases of having x and y axis.\n # It will also produce good dataset if the naming is inconsistent.\n for i in range(len(pos_names)):\n if \"x\" in pos_names[i]:\n pos_names[i] = \"x_pos\"\n elif \"y\" in pos_names[i]:\n pos_names[i] = \"y_pos\"\n if \"x_pos\" not in pos_names or \"y_pos\" not in pos_names:\n pos_names = [\"x_pos\", \"y_pos\"]\n\n # need to change shape to sth like [2, 100, 100]\n n_pos = min(pos_data.shape[2], len(pos_names))\n data_temp = np.zeros([n_pos, pos_data.shape[0], pos_data.shape[1]])\n\n for i in range(n_pos):\n data_temp[i, :, :] = pos_data[:, :, i]\n\n if fly_type in (\"pyramid\",):\n for i in range(data_temp.shape[0]):\n # flip position the same as data flip on det counts\n data_temp[i, :, :] = flip_data(data_temp[i, :, :], subscan_dims=subscan_dims)\n\n data_assembled[\"pos_names\"] = pos_names\n data_assembled[\"pos_data\"] = data_temp[:, :new_v_shape, :]\n\n # scaler data\n scaler_names, scaler_data = get_name_value_from_db(scaler_list, data, datashape)\n\n if fly_type in (\"pyramid\",):\n scaler_data = flip_data(scaler_data, subscan_dims=subscan_dims)\n\n if base_val is not None: # base line shift for detector, for SRX\n base_val = np.array([base_val])\n if len(base_val) == 1:\n scaler_data = np.abs(scaler_data - base_val)\n else:\n for i in scaler_data.shape[2]:\n scaler_data[:, :, i] = np.abs(scaler_data[:, :, i] - base_val[i])\n\n data_assembled[\"scaler_names\"] = scaler_names\n data_assembled[\"scaler_data\"] = scaler_data[:new_v_shape, :]\n\n return data_assembled\n\n\ndef get_name_value_from_db(name_list, data, datashape):\n \"\"\"\n Get name and data from db.\n \"\"\"\n pos_names = []\n pos_data = np.zeros([datashape[0], datashape[1], len(name_list)])\n for i, v in enumerate(name_list):\n posv = np.zeros(\n datashape[0] * datashape[1]\n ) # keep shape unchanged, so stopped/aborted run can be handled.\n data[v] = np.asarray(data[v]) # in case data might be list\n posv[: data[v].shape[0]] = np.asarray(data[v])\n pos_data[:, :, i] = posv.reshape([datashape[0], datashape[1]])\n pos_names.append(str(v))\n return pos_names, pos_data\n\n\ndef map_data2D(\n data,\n datashape,\n det_list=(\"xspress3_ch1\", \"xspress3_ch2\", \"xspress3_ch3\"),\n pos_list=(\"zpssx[um]\", \"zpssy[um]\"),\n scaler_list=(\"sclr1_ch3\", \"sclr1_ch4\"),\n create_each_det=False,\n fly_type=None,\n subscan_dims=None,\n spectrum_len=4096,\n):\n \"\"\"\n Data is obained from databroker. Transfer items from data to a dictionary of\n numpy array, which has 2D shape same as scanning area.\n\n This function can handle stopped/aborted scans. Raster scan (snake scan) is\n also considered.\n\n Parameters\n ----------\n data : pandas.core.frame.DataFrame\n data from data broker\n datashape : tuple or list\n shape of two D image\n det_list : list, tuple, optional\n list of detector channels\n pos_list : list, tuple, optional\n list of pos pv\n scaler_list : list, tuple, optional\n list of scaler pv\n fly_type : string or optional\n raster scan (snake scan) or normal\n subscan_dims : 1D array or optional\n used at HXN, 2D of a large area is split into small area scans\n spectrum_len : int, optional\n standard spectrum length\n\n Returns\n -------\n dict of numpy array\n \"\"\"\n data_output = {}\n new_v_shape = datashape[0] # updated if scan is not completed\n sum_data = None\n\n for n, c_name in enumerate(det_list):\n if c_name in data:\n detname = \"det\" + str(n + 1)\n logger.info(\"read data from %s\" % c_name)\n channel_data = data[c_name]\n\n # new veritcal shape is defined to ignore zeros points caused by stopped/aborted scans\n new_v_shape = len(channel_data) // datashape[1]\n new_data = np.vstack(channel_data)\n new_data = new_data.astype(np.float32, copy=False) # Change representation to np.float32\n new_data = new_data[: new_v_shape * datashape[1], :]\n new_data = new_data.reshape([new_v_shape, datashape[1], len(channel_data[1])])\n if new_data.shape[2] != spectrum_len:\n # merlin detector has spectrum len 2048\n # make all the spectrum len to 4096, to avoid unpredicted error in fitting part\n new_tmp = np.zeros([new_data.shape[0], new_data.shape[1], spectrum_len], dtype=np.float32)\n new_tmp[:, :, : new_data.shape[2]] = new_data\n new_data = new_tmp\n if fly_type in (\"pyramid\",):\n new_data = flip_data(new_data, subscan_dims=subscan_dims)\n if create_each_det:\n data_output[detname] = new_data\n if sum_data is None:\n # Note: Here is the place where the error was found!!!\n # The assignment in the next line used to be written as\n # sum_data = new_data\n # i.e. reference to data from 'det1' was assigned to 'sum_data'.\n # After computation of the sum, both 'sum_data' and detector 'det1'\n # were referencing the same ndarray, holding the sum of values\n # from detector channels 'det1', 'det2' and 'det3'. In addition, the sum is\n # computed again before data is saved into '.h5' file.\n # The algorithm for computing of the second sum is working correctly,\n # but since 'det1' already contains the true sum 'det1'+'det2'+'det3',\n # the computed sum equals 'det1'+2*'det2'+2*'det3'.\n # The problem was fixed by replacing assignment of reference during\n # initalization of 'sum_data' by copying the array.\n # The error is documented because the code was used for a long time\n # for initial processing of XRF imaging data at HXN beamline.\n sum_data = np.copy(new_data)\n else:\n sum_data += new_data\n data_output[\"det_sum\"] = sum_data\n\n # scanning position data\n pos_names, pos_data = get_name_value_from_db(pos_list, data, datashape)\n for i in range(len(pos_names)):\n if \"x\" in pos_names[i]:\n pos_names[i] = \"x_pos\"\n elif \"y\" in pos_names[i]:\n pos_names[i] = \"y_pos\"\n if \"x_pos\" not in pos_names or \"y_pos\" not in pos_names:\n pos_names = [\"x_pos\", \"y_pos\"]\n\n if fly_type in (\"pyramid\",):\n for i in range(pos_data.shape[2]):\n # flip position the same as data flip on det counts\n pos_data[:, :, i] = flip_data(pos_data[:, :, i], subscan_dims=subscan_dims)\n new_p = np.zeros([len(pos_names), pos_data.shape[0], pos_data.shape[1]])\n for i in range(len(pos_names)):\n new_p[i, :, :] = pos_data[:, :, i]\n data_output[\"pos_names\"] = pos_names\n data_output[\"pos_data\"] = new_p\n\n # scaler data\n scaler_names, scaler_data = get_name_value_from_db(scaler_list, data, datashape)\n if fly_type in (\"pyramid\",):\n scaler_data = flip_data(scaler_data, subscan_dims=subscan_dims)\n data_output[\"scaler_names\"] = scaler_names\n data_output[\"scaler_data\"] = scaler_data\n return data_output\n\n\ndef _get_fpath_not_existing(fpath):\n # Returns path to the new file that is guaranteed to not exist\n # The function cycles through paths obtained by inserting\n # version number between name and extension in the prototype path ``fpath``\n # The version number is inserted in the form ``filename_v2.ext``\n\n if os.path.exists(fpath):\n p, e = os.path.splitext(fpath)\n n = 1\n while True:\n fpath = f\"{p}_v{n}{e}\"\n if not os.path.exists(fpath):\n break\n n += 1\n return fpath\n\n\ndef save_data_to_hdf5(\n fpath, data, *, metadata=None, fname_add_version=False, file_overwrite_existing=False, create_each_det=True\n):\n \"\"\"\n This is the function used to save raw experiment data into HDF5 file. The raw data is\n represented as a dictionary with the following keys:\n\n keys 'det1', 'det2' etc. - 3D ndarrays of size (N, M, K) where NxM are dimensions of the map\n and K is the number of spectrum points (4096) contain data from the detector channels 1, 2, 3 etc.\n\n key 'det_sum' - 3D ndarray with the same dimensions as 'det1' contains the sum of the channels\n\n key 'scaler_names' - the list of scaler names\n\n key 'scaler_data' - 3D ndarray of scaler values. The array shape is (N, M, P), where P is\n the number of scaler names.\n\n key 'pos_names' - the list of position (axis) names, must contain the names 'x_pos' and 'y_pos'\n in correct order.\n\n key 'pos_data' - 3D ndarray with position values. The array must have size (2, N, M). The first\n index is the number of the position name 'pos_names' list.\n\n Parameters\n ----------\n fpath: str\n Full path to the HDF5 file. The function creates an new HDF5 file. If file already exists\n and ``file_overwrite_existing=False``, then the IOError exception is raised.\n data : dict\n The dictionary of raw data.\n metadata : dict\n Metadata to be saved in the HDF5 file. The function will add or overwrite the existing\n metadata fields: ``file_type``, ``file_format``, ``file_format_version``, ``file_created_time``.\n User may define metadata fields ``file_software`` and ``file_software_version``. If ``file_software``\n is not defined, then the default values for ``file_software`` and ``file_software_version`` are added.\n fname_add_version : boolean\n True: if file already exists, then file version is added to the file name\n so that it becomes unique in the current directory. The version is\n added to <fname>.h5 in the form <fname>_v1.h5, <fname>_v2.h5, etc.\n False: the exception is raised if the file exists.\n file_overwrite_existing : boolean\n Overwrite the existing file or raise exception if the file exists.\n create_each_det : boolean\n Save data from individual detectors (``True``) or only the sum of fluorescence from\n all detectors (``False``).\n\n Raises\n ------\n IOError\n Failed to write data to HDF5 file.\n \"\"\"\n\n fpath = os.path.expanduser(fpath)\n fpath = os.path.abspath(fpath)\n\n data = data.copy() # Must be a shallow copy (avoid creating copies of data arrays)\n metadata = copy.deepcopy(metadata) # Create deep copy (metadata is modified)\n\n interpath = \"xrfmap\"\n sum_data, sum_data_exists = None, False\n xrf_det_list = [n for n in data.keys() if \"det\" in n and \"sum\" not in n]\n xrf_det_list.sort()\n\n # Verify that raw fluorescence data is represented with np.float32 precision: print the warning message\n # and convert the raw spectrum data to np.float32. Assume that data is represented as ndarray.\n def incorrect_type_msg(channel, data_type):\n logger.debug(\n f\"Attemptying to save raw fluorescence data for the channel '{channel}' \"\n f\"as '{data_type}' numbers.\\n Memory may be used inefficiently. \"\n f\"The data is converted from '{data_type}' to 'np.float32' before saving to file.\"\n )\n\n if \"det_sum\" in data and isinstance(data[\"det_sum\"], np.ndarray):\n if data[\"det_sum\"].dtype != np.float32:\n incorrect_type_msg(\"det_sum\", data[\"det_sum\"].dtype)\n data[\"det_sum\"] = data[\"det_sum\"].astype(np.float32, copy=False)\n sum_data = data[\"det_sum\"]\n sum_data_exists = True\n\n for detname in xrf_det_list:\n if detname in data and isinstance(data[detname], np.ndarray):\n if data[detname].dtype != np.float32:\n incorrect_type_msg(detname, data[detname].dtype)\n data[detname] = data[detname].astype(np.float32, copy=False)\n\n if not sum_data_exists: # Don't compute it if it already exists\n if sum_data is None:\n sum_data = np.copy(data[detname])\n else:\n sum_data += data[detname]\n\n file_open_mode = \"a\"\n if os.path.exists(fpath):\n if fname_add_version:\n # Creates unique file name\n fpath = _get_fpath_not_existing(fpath)\n else:\n if file_overwrite_existing:\n # Overwrite the existing file. This completely deletes the HDF5 file,\n # including all information (possibly processed results).\n file_open_mode = \"w\"\n else:\n raise IOError(f\"Function 'save_data_to_hdf5': File '{fpath}' already exists\")\n\n with h5py.File(fpath, file_open_mode) as f:\n\n # Create metadata group\n metadata_grp = f.create_group(f\"{interpath}/scan_metadata\")\n\n metadata_additional = {\n \"file_type\": \"XRF-MAP\",\n \"file_format\": \"NSLS2-XRF-MAP\",\n \"file_format_version\": \"1.0\",\n \"file_created_time\": ttime.strftime(\"%Y-%m-%dT%H:%M:%S+00:00\", ttime.localtime()),\n }\n\n metadata_software_version = {\n \"file_software\": \"PyXRF\",\n \"file_software_version\": pyxrf_version,\n }\n\n metadata_prepared = metadata or {}\n metadata_prepared.update(metadata_additional)\n if \"file_software\" not in metadata_prepared:\n metadata_prepared.update(metadata_software_version)\n\n if metadata_prepared:\n # We assume, that metadata does not contain repeated keys. Otherwise the\n # entry with the last occurrence of the key will override the previous ones.\n for key, value in metadata_prepared.items():\n metadata_grp.attrs[key] = value\n\n if create_each_det is True:\n for detname in xrf_det_list:\n new_data = data[detname]\n dataGrp = f.create_group(interpath + \"/\" + detname)\n ds_data = dataGrp.create_dataset(\"counts\", data=new_data, compression=\"gzip\")\n ds_data.attrs[\"comments\"] = \"Experimental data from {}\".format(detname)\n\n # summed data\n if sum_data is not None:\n dataGrp = f.create_group(interpath + \"/detsum\")\n ds_data = dataGrp.create_dataset(\"counts\", data=sum_data, compression=\"gzip\")\n ds_data.attrs[\"comments\"] = \"Experimental data from channel sum\"\n\n # add positions\n if \"pos_names\" in data:\n dataGrp = f.create_group(interpath + \"/positions\")\n pos_names = data[\"pos_names\"]\n pos_data = data[\"pos_data\"]\n dataGrp.create_dataset(\"name\", data=helper_encode_list(pos_names))\n dataGrp.create_dataset(\"pos\", data=pos_data)\n\n # scaler data\n if \"scaler_data\" in data:\n dataGrp = f.create_group(interpath + \"/scalers\")\n scaler_names = data[\"scaler_names\"]\n scaler_data = data[\"scaler_data\"]\n dataGrp.create_dataset(\"name\", data=helper_encode_list(scaler_names))\n dataGrp.create_dataset(\"val\", data=scaler_data)\n\n return fpath\n\n\nwrite_db_to_hdf_base = save_data_to_hdf5 # Backward compatibility\n\n\n'''\n# This may not be needed, since hdr always goes out of scope\ndef clear_handler_cache(hdr):\n \"\"\"\n Clear handler cache after loading data.\n\n Parameters\n ----------\n hdr\n reference to the handler\n \"\"\"\n if LooseVersion(databroker.__version__) >= LooseVersion('1.0.0'):\n hdr._data_source.fillers['yes']._handler_cache.clear()\n hdr._data_source.fillers['delayed']._handler_cache.clear()\n'''\n\n\n# TODO: the following function may be deleted after Databroker 0.13 is forgotten\ndef free_memory_from_handler():\n \"\"\"\n Quick way to set 3D dataset at handler to None to release memory.\n \"\"\"\n # The following check is redundant: Data Broker prior to version 1.0.0 always has '_handler_cache'.\n # In later versions of databroker the attribute may still be present if 'databroker.v0' is used.\n if (LooseVersion(databroker.__version__) < LooseVersion(\"1.0.0\")) or hasattr(db.fs, \"_handler_cache\"):\n for h in db.fs._handler_cache.values():\n setattr(h, \"_dataset\", None)\n print(\"Memory is released.\")\n\n\ndef export1d(runid, name=None):\n \"\"\"\n Export all PVs to a file. Do not talk to filestore.\n\n Parameters\n ----------\n name : str or optional\n name for the file\n runid : int\n run number\n \"\"\"\n t = db.get_table(db[runid], fill=False)\n if name is None:\n name = \"scan_\" + str(runid) + \".txt\"\n t.to_csv(name)\n\n\ndef helper_encode_list(data, data_type=\"utf-8\"):\n return [d.encode(data_type) for d in data]\n\n\ndef helper_decode_list(data, data_type=\"utf-8\"):\n return [d.decode(data_type) for d in data]\n\n\ndef get_data_per_event(n, data, e, det_num):\n db.fill_event(e)\n min_len = e.data[\"fluor\"].shape[0]\n for i in range(det_num):\n data[n, :min_len, :] += e.data[\"fluor\"][:, i, :]\n\n\ndef get_data_parallel(data, elist, det_num):\n num_processors_to_use = multiprocessing.cpu_count() - 2\n\n print(\"cpu count: {}\".format(num_processors_to_use))\n pool = multiprocessing.Pool(num_processors_to_use)\n\n # result_pool = [\n # pool.apply_async(get_data_per_event, (n, data, e, det_num))\n # for n, e in enumerate(elist)]\n\n # results = [r.get() for r in result_pool]\n\n pool.terminate()\n pool.join()\n"
] |
[
[
"numpy.linspace",
"numpy.asarray",
"numpy.squeeze",
"numpy.hstack",
"numpy.swapaxes",
"numpy.fliplr",
"numpy.arange",
"numpy.reshape",
"numpy.sin",
"numpy.copy",
"numpy.diff",
"numpy.zeros",
"numpy.append",
"numpy.transpose",
"numpy.array",
"numpy.flip",
"numpy.sum",
"numpy.meshgrid",
"numpy.abs",
"numpy.cos",
"numpy.empty",
"numpy.ones",
"numpy.vstack"
]
] |
AlexanderIvanovQC/sklearn-onnx
|
[
"0afbe295aa3f1abbcea60f582faac31d16bd3ab0"
] |
[
"tests/test_utils/utils_backend.py"
] |
[
"# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"\nHelpers to test runtimes.\n\"\"\"\nimport os\nimport sys\nimport glob\nimport pickle\nfrom distutils.version import StrictVersion # noqa\nimport numpy\nfrom numpy.testing import assert_array_almost_equal, assert_array_equal\nimport onnx\nimport onnxruntime\n\n\nclass ExpectedAssertionError(Exception):\n \"\"\"\n Expected failure.\n \"\"\"\n pass\n\n\nclass OnnxRuntimeAssertionError(AssertionError):\n \"\"\"\n Expected failure.\n \"\"\"\n def __init__(self, msg):\n new_msg = \"{}\\nonnx=={} onnxruntime=={}\".format(\n msg, onnx.__version__, onnxruntime.__version__)\n AssertionError.__init__(self, new_msg)\n\n\nclass OnnxRuntimeMissingNewOnnxOperatorException(OnnxRuntimeAssertionError):\n \"\"\"\n Raised when onnxruntime does not implement a new operator\n defined in the latest onnx.\n \"\"\"\n pass\n\n\ndef evaluate_condition(backend, condition):\n \"\"\"\n Evaluates a condition such as\n ``StrictVersion(onnxruntime.__version__) <= StrictVersion('0.1.3')``\n \"\"\"\n if backend == \"onnxruntime\":\n import onnxruntime # noqa\n import onnx # noqa\n return eval(condition)\n else:\n raise NotImplementedError(\n \"Not implemented for backend '{0}' and \"\n \"condition '{1}'.\".format(backend, condition))\n\n\ndef is_backend_enabled(backend):\n \"\"\"\n Tells if a backend is enabled.\n Raises an exception if backend != 'onnxruntime'.\n Unit tests only test models against this backend.\n \"\"\"\n if backend == \"onnxruntime\":\n try:\n import onnxruntime # noqa\n return True\n except ImportError:\n return False\n else:\n raise NotImplementedError(\n \"Not implemented for backend '{0}'\".format(backend))\n\n\ndef compare_backend(backend,\n test,\n decimal=5,\n options=None,\n verbose=False,\n context=None,\n comparable_outputs=None,\n intermediate_steps=False,\n classes=None,\n disable_optimisation=False):\n \"\"\"\n The function compares the expected output (computed with\n the model before being converted to ONNX) and the ONNX output\n produced with module *onnxruntime*.\n\n :param backend: backend to use to run the comparison,\n only *onnxruntime* is currently supported\n :param test: dictionary with the following keys:\n - *onnx*: onnx model (filename or object)\n - *expected*: expected output (filename pkl or object)\n - *data*: input data (filename pkl or object)\n :param decimal: precision of the comparison\n :param options: comparison options\n :param context: specifies custom operators\n :param comparable_outputs: compare only these outputs\n :param verbose: in case of error, the function may print\n more information on the standard output\n :param intermediate_steps: displays intermediate steps\n in case of an error\n :param classes: classes names (if option 'nocl' is used)\n :param disable_optimisation: disable optimisation onnxruntime\n could do\n\n The function does not return anything but raises an error\n if the comparison failed.\n :return: tuple (output, lambda function to call onnx predictions)\n \"\"\"\n if backend == \"onnxruntime\":\n if sys.version_info[0] == 2:\n # onnxruntime is not available on Python 2.\n return\n from .utils_backend_onnxruntime import compare_runtime\n return compare_runtime(test,\n decimal,\n options=options,\n verbose=verbose,\n comparable_outputs=comparable_outputs,\n intermediate_steps=intermediate_steps,\n classes=classes,\n disable_optimisation=disable_optimisation)\n else:\n raise ValueError(\"Does not support backend '{0}'.\".format(backend))\n\n\ndef search_converted_models(root=None):\n \"\"\"\n Searches for all converted models generated by\n unit tests in folders *tests_dump* and with function\n *dump_data_and_model*.\n \"\"\"\n if root is None:\n root = os.path.abspath(\n os.path.join(os.path.dirname(__file__), \"..\", \"__dump_data\"))\n root = os.path.normpath(root)\n if not os.path.exists(root):\n raise FileNotFoundError(\"Unable to find '{0}'.\".format(root))\n\n founds = glob.iglob(\"{0}/**/*.model.onnx\".format(root), recursive=True)\n keep = []\n for found in founds:\n onnx = found\n basename = onnx[:-len(\".model.onnx\")]\n data = basename + \".data.pkl\"\n expected = basename + \".expected.pkl\"\n res = dict(onnx=onnx, data=data, expected=expected)\n ok = True\n for k, v in res.items():\n if not os.path.exists(v):\n ok = False\n if ok:\n models = [basename + \".model.pkl\", basename + \".model.keras\"]\n for model in models:\n if os.path.exists(model):\n res['model'] = model\n break\n if 'model' in res:\n keep.append((basename, res))\n keep.sort()\n return [_[1] for _ in keep]\n\n\ndef load_data_and_model(items_as_dict, **context):\n \"\"\"\n Loads every file in a dictionary {key: filename}.\n The extension is either *pkl* and *onnx* and determines\n how it it loaded. If the value is not a string,\n the function assumes it was already loaded.\n \"\"\"\n res = {}\n for k, v in items_as_dict.items():\n if isinstance(v, str):\n if os.path.splitext(v)[-1] == \".pkl\":\n with open(v, \"rb\") as f:\n try:\n bin = pickle.load(f)\n except ImportError as e:\n if '.model.' in v:\n continue\n else:\n raise ImportError(\n \"Unable to load '{0}' due to {1}\".format(v, e))\n res[k] = bin\n elif os.path.splitext(v)[-1] == \".keras\":\n import keras.models\n res[k] = keras.models.load_model(v, custom_objects=context)\n else:\n res[k] = v\n else:\n res[k] = v\n return res\n\n\ndef extract_options(name):\n \"\"\"\n Extracts comparison option from filename.\n As example, ``Binarizer-SkipDim1`` means\n options *SkipDim1* is enabled.\n ``(1, 2)`` and ``(2,)`` are considered equal.\n Available options: see :func:`dump_data_and_model`.\n \"\"\"\n opts = name.replace(\"\\\\\", \"/\").split(\"/\")[-1].split('.')[0].split('-')\n if len(opts) == 1:\n return {}\n else:\n res = {}\n for opt in opts[1:]:\n if opt in (\"SkipDim1\", \"OneOff\", \"NoProb\", \"NoProbOpp\",\n \"Dec4\", \"Dec3\", \"Dec2\", \"Dec1\", 'Svm',\n 'Out0', 'Reshape', 'SklCol', 'DF', 'OneOffArray',\n 'Out1'):\n res[opt] = True\n else:\n raise NameError(\"Unable to parse option '{}'\".format(opts[1:]))\n return res\n\n\ndef compare_outputs(expected, output, verbose=False, **kwargs):\n \"\"\"\n Compares expected values and output.\n Returns None if no error, an exception message otherwise.\n \"\"\"\n SkipDim1 = kwargs.pop(\"SkipDim1\", False)\n NoProb = kwargs.pop(\"NoProb\", False)\n NoProbOpp = kwargs.pop(\"NoProbOpp\", False)\n Dec4 = kwargs.pop(\"Dec4\", False)\n Dec3 = kwargs.pop(\"Dec3\", False)\n Dec2 = kwargs.pop(\"Dec2\", False)\n Dec1 = kwargs.pop(\"Dec1\", False)\n Disc = kwargs.pop(\"Disc\", False)\n Mism = kwargs.pop(\"Mism\", False)\n\n if Dec4:\n kwargs[\"decimal\"] = min(kwargs[\"decimal\"], 4)\n if Dec3:\n kwargs[\"decimal\"] = min(kwargs[\"decimal\"], 3)\n if Dec2:\n kwargs[\"decimal\"] = min(kwargs[\"decimal\"], 2)\n if Dec1:\n kwargs[\"decimal\"] = min(kwargs[\"decimal\"], 1)\n if isinstance(expected, numpy.ndarray) and isinstance(\n output, numpy.ndarray):\n if SkipDim1:\n # Arrays like (2, 1, 2, 3) becomes (2, 2, 3)\n # as one dimension is useless.\n expected = expected.reshape(\n tuple([d for d in expected.shape if d > 1]))\n output = output.reshape(tuple([d for d in expected.shape\n if d > 1]))\n if NoProb or NoProbOpp:\n # One vector is (N,) with scores, negative for class 0\n # positive for class 1\n # The other vector is (N, 2) score in two columns.\n if len(output.shape) == 2 and output.shape[1] == 2 and len(\n expected.shape) == 1:\n output = output[:, 1]\n if NoProbOpp:\n output = -output\n elif len(output.shape) == 1 and len(expected.shape) == 1:\n pass\n elif len(expected.shape) == 1 and len(output.shape) == 2 and \\\n expected.shape[0] == output.shape[0] and \\\n output.shape[1] == 1:\n output = output[:, 0]\n if NoProbOpp:\n output = -output\n elif expected.shape != output.shape:\n raise NotImplementedError(\"Shape mismatch: {0} != {1}\".format(\n expected.shape, output.shape))\n if len(expected.shape) == 1 and len(\n output.shape) == 2 and output.shape[1] == 1:\n output = output.ravel()\n if len(output.shape) == 3 and output.shape[0] == 1 and len(\n expected.shape) == 2:\n output = output.reshape(output.shape[1:])\n if expected.dtype in (numpy.str, numpy.dtype(\"<U1\"),\n numpy.dtype(\"<U3\")):\n try:\n assert_array_equal(expected, output, verbose=verbose)\n except Exception as e:\n if Disc:\n # Bug to be fixed later.\n return ExpectedAssertionError(str(e))\n else:\n return OnnxRuntimeAssertionError(str(e))\n else:\n try:\n assert_array_almost_equal(expected,\n output,\n verbose=verbose,\n **kwargs)\n except Exception as e:\n longer = \"\\n--EXPECTED--\\n{0}\\n--OUTPUT--\\n{1}\".format(\n expected, output) if verbose else \"\"\n expected_ = numpy.asarray(expected).ravel()\n output_ = numpy.asarray(output).ravel()\n if len(expected_) == len(output_):\n if numpy.issubdtype(expected_.dtype, numpy.floating):\n diff = numpy.abs(expected_ - output_).max()\n else:\n diff = max((1 if ci != cj else 0)\n for ci, cj in zip(expected_, output_))\n if diff == 0:\n return None\n elif Mism:\n return ExpectedAssertionError(\n \"dimension mismatch={0}, {1}\\n{2}{3}\".format(\n expected.shape, output.shape, e, longer))\n else:\n return OnnxRuntimeAssertionError(\n \"dimension mismatch={0}, {1}\\n{2}{3}\".format(\n expected.shape, output.shape, e, longer))\n if Disc:\n # Bug to be fixed later.\n return ExpectedAssertionError(\n \"max-diff={0}\\n--expected--output--\\n{1}{2}\".format(\n diff, e, longer))\n else:\n return OnnxRuntimeAssertionError(\n \"max-diff={0}\\n--expected--output--\\n{1}{2}\".format(\n diff, e, longer))\n else:\n return OnnxRuntimeAssertionError(\"Unexpected types {0} != {1}\".format(\n type(expected), type(output)))\n return None\n"
] |
[
[
"numpy.abs",
"numpy.asarray",
"numpy.issubdtype",
"numpy.dtype",
"numpy.testing.assert_array_equal",
"numpy.testing.assert_array_almost_equal"
]
] |
bmazoure/dreamerv2_jax
|
[
"f7a1b6233280f756df524e6b70455b8607d30c44"
] |
[
"dreamerv2/common/batched_buffer.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Dopamine Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"The standard DQN replay memory.\nThis implementation is an out-of-graph replay memory + in-graph wrapper. It\nsupports vanilla n-step updates of the form typically found in the literature,\ni.e. where rewards are accumulated for n steps and the intermediate trajectory\nis not exposed to the agent. This does not allow, for example, performing\noff-policy corrections.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport gzip\nimport math\nimport os\nimport pickle\nimport time\n\nfrom absl import logging\nimport numpy as np\n\nimport jax\nfrom jax import numpy as jnp\nimport functools\n\nfrom . import deterministic_sum_tree as sum_tree\n\n# Defines a type describing part of the tuple returned by the replay\n# memory. Each element of the tuple is a tensor of shape [batch, ...] where\n# ... is defined the 'shape' field of ReplayElement. The tensor type is\n# given by the 'type' field. The 'name' field is for convenience and ease of\n# debugging.\nReplayElement = (\n collections.namedtuple('shape_type', ['name', 'shape', 'type']))\n\n# A prefix that can not collide with variable names for checkpoint files.\nSTORE_FILENAME_PREFIX = '$store$_'\n\n# This constant determines how many iterations a checkpoint is kept for.\nCHECKPOINT_DURATION = 4\n\n\ndef modulo_range(start, length, modulo):\n for i in range(length):\n yield (start + i) % modulo\n\n\ndef invalid_range(cursor, replay_capacity, stack_size, update_horizon):\n \"\"\"Returns a array with the indices of cursor-related invalid transitions.\n There are update_horizon + stack_size invalid indices:\n - The update_horizon indices before the cursor, because we do not have a\n valid N-step transition (including the next state).\n - The stack_size indices on or immediately after the cursor.\n If N = update_horizon, K = stack_size, and the cursor is at c, invalid\n indices are:\n c - N, c - N + 1, ..., c, c + 1, ..., c + K - 1.\n It handles special cases in a circular buffer in the beginning and the end.\n Args:\n cursor: int, the position of the cursor.\n replay_capacity: int, the size of the replay memory.\n stack_size: int, the size of the stacks returned by the replay memory.\n update_horizon: int, the agent's update horizon.\n Returns:\n np.array of size stack_size with the invalid indices.\n \"\"\"\n assert cursor < replay_capacity\n return np.array(\n [(cursor - update_horizon + i) % replay_capacity\n for i in range(stack_size + update_horizon)])\n\nclass JaxSubsequenceParallelEnvReplayBuffer(object):\n \"\"\"A simple out-of-graph Replay Buffer.\n Stores transitions, state, action, reward, next_state, terminal (and any\n extra contents specified) in a circular buffer and provides a uniform\n transition sampling function.\n When the states consist of stacks of observations storing the states is\n inefficient. This class writes observations and constructs the stacked states\n at sample time.\n Attributes:\n add_count: int, counter of how many transitions have been added (including\n the blank ones at the beginning of an episode).\n invalid_range: np.array, an array with the indices of cursor-related invalid\n transitions\n \"\"\"\n\n def __init__(self,\n observation_shape,\n stack_size,\n replay_capacity,\n batch_size,\n subseq_len,\n seed=0,\n n_envs=1,\n update_horizon=1,\n gamma=0.99,\n max_sample_attempts=1000,\n extra_storage_types=None,\n persistent=True,\n observation_dtype=np.uint8,\n terminal_dtype=np.uint8,\n action_shape=(),\n action_dtype=np.int32,\n reward_shape=(),\n reward_dtype=np.float32):\n \"\"\"Initializes OutOfGraphReplayBuffer.\n Args:\n observation_shape: tuple of ints.\n stack_size: int, number of frames to use in state stack.\n replay_capacity: int, number of transitions to keep in memory.\n batch_size: int.\n update_horizon: int, length of update ('n' in n-step update).\n gamma: int, the discount factor.\n max_sample_attempts: int, the maximum number of attempts allowed to\n get a sample.\n extra_storage_types: list of ReplayElements defining the type of the extra\n contents that will be stored and returned by sample_transition_batch.\n observation_dtype: np.dtype, type of the observations. Defaults to\n np.uint8 for Atari 2600.\n terminal_dtype: np.dtype, type of the terminals. Defaults to np.uint8 for\n Atari 2600.\n action_shape: tuple of ints, the shape for the action vector. Empty tuple\n means the action is a scalar.\n action_dtype: np.dtype, type of elements in the action.\n reward_shape: tuple of ints, the shape of the reward vector. Empty tuple\n means the reward is a scalar.\n reward_dtype: np.dtype, type of elements in the reward.\n Raises:\n ValueError: If replay_capacity is too small to hold at least one\n transition.\n \"\"\"\n assert isinstance(observation_shape, tuple)\n if replay_capacity < update_horizon + stack_size:\n raise ValueError('There is not enough capacity to cover '\n 'update_horizon and stack_size.')\n\n logging.info(\n 'Creating a %s replay memory with the following parameters:',\n self.__class__.__name__)\n logging.info('\\t observation_shape: %s', str(observation_shape))\n logging.info('\\t observation_dtype: %s', str(observation_dtype))\n logging.info('\\t terminal_dtype: %s', str(terminal_dtype))\n logging.info('\\t stack_size: %d', stack_size)\n logging.info('\\t replay_capacity: %d', replay_capacity)\n logging.info('\\t batch_size: %d', batch_size)\n logging.info('\\t update_horizon: %d', update_horizon)\n logging.info('\\t gamma: %f', gamma)\n\n self._action_shape = action_shape\n self._action_dtype = action_dtype\n self._reward_shape = reward_shape\n self._reward_dtype = reward_dtype\n self._observation_shape = observation_shape\n self._stack_size = stack_size\n self._state_shape = self._observation_shape + (self._stack_size,)\n self._batch_size = batch_size\n self._update_horizon = update_horizon\n self._gamma = gamma\n self._observation_dtype = observation_dtype\n self._terminal_dtype = terminal_dtype\n self._max_sample_attempts = max_sample_attempts\n self._subseq_len = subseq_len\n self._rng = jax.random.PRNGKey(seed)\n\n self.persistent = persistent\n self.old_indices = None\n\n self.n_envs = n_envs\n self._replay_length = int(replay_capacity // self.n_envs)\n\n # Gotta round this down, since the matrix is rectangular.\n self._replay_capacity = self._replay_length * self.n_envs\n\n self.total_steps = 0\n\n if extra_storage_types:\n self._extra_storage_types = extra_storage_types\n else:\n self._extra_storage_types = []\n self._create_storage()\n self.add_count = np.array(0)\n self.invalid_range = np.zeros((self._stack_size))\n # When the horizon is > 1, we compute the sum of discounted rewards as a dot\n # product using the precomputed vector <gamma^0, gamma^1, ..., gamma^{n-1}>.\n self._cumulative_discount_vector = np.array(\n [math.pow(self._gamma, n) for n in range(update_horizon)],\n dtype=np.float32)\n self._next_experience_is_episode_start = True\n self._episode_end_indices = set()\n self._episode_start_indices = set([(0, i) for i in range(n_envs)])\n\n def _create_storage(self):\n \"\"\"Creates the numpy arrays used to store transitions.\n \"\"\"\n self._store = {}\n for storage_element in self.get_storage_signature():\n array_shape = [self._replay_length, self.n_envs] + list(storage_element.shape)\n self._store[storage_element.name] = np.empty(\n array_shape, dtype=storage_element.type)\n\n def get_add_args_signature(self):\n \"\"\"The signature of the add function.\n Note - Derived classes may return a different signature.\n Returns:\n list of ReplayElements defining the type of the argument signature needed\n by the add function.\n \"\"\"\n return self.get_storage_signature()\n\n def get_storage_signature(self):\n \"\"\"Returns a default list of elements to be stored in this replay memory.\n Note - Derived classes may return a different signature.\n Returns:\n list of ReplayElements defining the type of the contents stored.\n \"\"\"\n storage_elements = [\n ReplayElement('observation', self._observation_shape,\n self._observation_dtype),\n ReplayElement('action', self._action_shape, self._action_dtype),\n ReplayElement('reward', self._reward_shape, self._reward_dtype),\n ReplayElement('terminal', (), self._terminal_dtype)\n ]\n\n for extra_replay_element in self._extra_storage_types:\n storage_elements.append(extra_replay_element)\n return storage_elements\n\n def _add_zero_transition(self):\n \"\"\"Adds a padding transition filled with zeros (Used in episode beginnings).\n \"\"\"\n zero_transition = []\n for element_type in self.get_add_args_signature():\n zero_transition.append(\n np.zeros(element_type.shape, dtype=element_type.type))\n self._episode_end_indices.discard(self.cursor()) # If present\n self._add(*zero_transition)\n\n def add(self,\n observation,\n action,\n reward,\n terminal,\n *args,\n priority=None,\n episode_end=False):\n \"\"\"Adds a transition to the replay memory.\n This function checks the types and handles the padding at the beginning of\n an episode. Then it calls the _add function.\n Since the next_observation in the transition will be the observation added\n next there is no need to pass it.\n If the replay memory is at capacity the oldest transition will be discarded.\n Args:\n observation: np.array with shape observation_shape.\n action: int, the action in the transition.\n reward: float, the reward received in the transition.\n terminal: np.dtype, acts as a boolean indicating whether the transition\n was terminal (1) or not (0).\n *args: extra contents with shapes and dtypes according to\n extra_storage_types.\n priority: float, unused in the circular replay buffer, but may be used\n in child classes like PrioritizedReplayBuffer.\n episode_end: bool, whether this experience is the last experience in\n the episode. This is useful for tasks that terminate due to time-out,\n but do not end on a terminal state. Overloading 'terminal' may not\n be sufficient in this case, since 'terminal' is passed to the agent\n for training. 'episode_end' allows the replay buffer to determine\n episode boundaries without passing that information to the agent.\n \"\"\"\n if priority is not None:\n args = args + (priority,)\n\n self.total_steps += self.n_envs\n\n observation = observation[..., -self._observation_shape[-1]:]\n self._check_add_types(observation, action, reward, terminal, *args)\n\n resets = episode_end + terminal\n for i in range(resets.shape[0]):\n if resets[i]:\n self._episode_end_indices.add((self.cursor(), i))\n self._episode_start_indices.add((self.cursor()+1, i))\n # self._next_experience_is_episode_start = True\n else:\n self._episode_end_indices.discard((self.cursor(), i)) # If present\n self._episode_start_indices.discard((self.cursor()+1, i))\n\n self._add(observation, action, reward, terminal, *args)\n\n def _add(self, *args):\n \"\"\"Internal add method to add to the storage arrays.\n Args:\n *args: All the elements in a transition.\n \"\"\"\n self._check_args_length(*args)\n transition = {e.name: args[idx]\n for idx, e in enumerate(self.get_add_args_signature())}\n self._add_transition(transition)\n\n def _add_transition(self, transition):\n \"\"\"Internal add method to add transition dictionary to storage arrays.\n Args:\n transition: The dictionary of names and values of the transition\n to add to the storage.\n Each tensor should have leading dim equal to the number\n of environments used by the buffer.\n \"\"\"\n cursor = self.cursor()\n for arg_name in transition:\n self._store[arg_name][cursor] = transition[arg_name]\n\n self.add_count += 1\n self.invalid_range = invalid_range(\n self.cursor(), self._replay_length, self._stack_size,\n self._update_horizon)\n\n def _check_args_length(self, *args):\n \"\"\"Check if args passed to the add method have the same length as storage.\n Args:\n *args: Args for elements used in storage.\n Raises:\n ValueError: If args have wrong length.\n \"\"\"\n if len(args) != len(self.get_add_args_signature()):\n raise ValueError('Add expects {} elements, received {}'.format(\n len(self.get_add_args_signature()), len(args)))\n\n def _check_add_types(self, *args):\n \"\"\"Checks if args passed to the add method match those of the storage.\n Args:\n *args: Args whose types need to be validated.\n Raises:\n ValueError: If args have wrong shape or dtype.\n \"\"\"\n self._check_args_length(*args)\n for i, (arg_element, store_element) in enumerate(zip(args, self.get_add_args_signature())):\n if isinstance(arg_element, np.ndarray):\n arg_shape = arg_element.shape\n elif isinstance(arg_element, tuple) or isinstance(arg_element, list):\n # TODO(b/80536437). This is not efficient when arg_element is a list.\n arg_shape = np.array(arg_element).shape\n else:\n # Assume it is scalar.\n arg_shape = tuple()\n store_element_shape = tuple(store_element.shape)\n assert arg_shape[0] == self.n_envs\n arg_shape = arg_shape[1:]\n if arg_shape != store_element_shape:\n raise ValueError('arg {} has shape {}, expected {}'.format(i,\n arg_shape, store_element_shape))\n\n def is_empty(self):\n \"\"\"Is the Replay Buffer empty?\"\"\"\n return self.add_count == 0\n\n def is_full(self):\n \"\"\"Is the Replay Buffer full?\"\"\"\n return self.add_count >= self._replay_length\n\n def ravel_indices(self, indices_t, indices_b):\n return np.ravel_multi_index((indices_t, indices_b),\n (self._replay_length, self.n_envs),\n mode=\"wrap\")\n\n def unravel_indices(self, indices):\n return np.unravel_index(indices, (self._replay_length, self.n_envs))\n\n def get_from_store(self, element_name, indices_t, indices_b):\n array = self._store[element_name]\n return array[indices_t, indices_b]\n\n def cursor(self):\n \"\"\"Index to the location where the next transition will be written.\"\"\"\n return self.add_count % self._replay_length\n\n def parallel_get_stack(self, element_name, indices_t, indices_b, first_valid):\n indices_t = np.arange(-self._stack_size + 1, 1)[:, None] + indices_t[None, :]\n indices_b = indices_b[None, :].repeat(self._stack_size, axis=0)\n mask = indices_t >= first_valid\n result = self.get_from_store(element_name, indices_t % self._replay_length, indices_b)\n mask = mask.reshape(*mask.shape, *([1]*(len(result.shape)-2)))\n result = result*mask\n result = np.moveaxis(result, 0, -1)\n return result\n\n def get_terminal_stack(self, index_t, index_b):\n return self.parallel_get_stack(\"terminal\", index_t, index_b, 0)\n\n def is_valid_transition(self, index_t, index_b):\n \"\"\"Checks if the index contains a valid transition.\n Checks for collisions with the end of episodes and the current position\n of the cursor.\n Args:\n index: int, the index to the state in the transition.\n Returns:\n Is the index valid: Boolean.\n Start of the current episode (if within our stack size): Integer.\n \"\"\"\n # Check the index is in the valid range\n if index_t < 0 or index_t >= self._replay_length:\n return False\n if not self.is_full():\n # The indices and next_indices must be smaller than the cursor.\n if index_t >= self.cursor() - self._update_horizon - self._subseq_len:\n return False\n # The first few indices contain the padding states of the first episode.\n if index_t < self._stack_size - 1:\n return False\n\n # Skip transitions that straddle the cursor.\n if index_t in set(self.invalid_range):\n return False\n\n # If the episode ends before the update horizon, without a terminal signal,\n # it is invalid.\n for i in modulo_range(index_t, self._update_horizon, self._replay_length):\n if (i, index_b) in self._episode_end_indices and not self._store['terminal'][i, index_b]:\n return False\n\n return True\n\n def is_ep_start(self, index_t, index_b):\n \"\"\"Check whether or not a transition is the start of a new episode.\n\n Params:\n index_t: time index of transition\n index_b: batch index of transition\n Returns: 0 if (index_t, index_b) is not the start of an episode. Else,\n returns the first step of the new episode.\n \"\"\"\n # If there are terminal flags in any frame other than the last one\n # the stack is not valid, so don't sample it.\n terminals = self.get_terminal_stack(index_t, index_b)[0, :-1]\n if terminals.any():\n # Suppose ind_t = 100, stack_size = 4, and step 98 has a terminal.\n # Then step 99 is the first of the new episode.\n # terminals.argmax()==1. 100 - 4 + 1 + 2 == 99, so this is correct\n ep_start = index_t - self._stack_size + terminals.argmax() + 2\n else:\n ep_start = 0\n return ep_start\n\n def _create_batch_arrays(self, batch_size):\n \"\"\"Create a tuple of arrays with the type of get_transition_elements.\n When using the WrappedReplayBuffer with staging enabled it is important to\n create new arrays every sample because StaginArea keeps a pointer to the\n returned arrays.\n Args:\n batch_size: (int) number of transitions returned. If None the default\n batch_size will be used.\n Returns:\n Tuple of np.arrays with the shape and type of get_transition_elements.\n \"\"\"\n transition_elements = self.get_transition_elements(batch_size)\n batch_arrays = []\n for element in transition_elements:\n batch_arrays.append(np.zeros(element.shape, dtype=element.type))\n return tuple(batch_arrays)\n\n def sample_ep_start(self):\n for _ in range(self._max_sample_attempts):\n self._rng, rng = jax.random.split(self._rng)\n i = jax.random.choice(rng, len(self._episode_start_indices))\n t, b = list(self._episode_start_indices)[i]\n if self.is_valid_transition(t, b):\n return t, b\n return t, b\n\n def sample_index_batch_persistent(self, old_indices):\n t_indices, b_indices = old_indices # (2, batch_size)\n t_indices = np.array(t_indices)\n b_indices = np.array(b_indices)\n self._rng, rng = jax.random.split(self._rng)\n\n # In case one of the initial indices we were given was invalid, check\n # each and censor those not allowed.\n # Note that this will happen every time we start reading an episode\n # in progress, so is not rare.\n needs_resample = [i for i in range(len(t_indices)) if not\n self.is_valid_transition(t_indices[i].item(),\n b_indices[i].item())]\n for i in needs_resample:\n t_indices[i], b_indices[i] = self.sample_ep_start()\n\n # Broadcast the batch indices and extend the time indices\n b_indices = b_indices[:, None].repeat(self._subseq_len, axis=1)\n state_indices = (t_indices[:, None] + np.arange(self._subseq_len)[None, :])\n\n # Check if any indices are episode starts\n censor_before = [self.is_ep_start(t_indices[i:i+1], b_indices[i:i+1])\n for i in range(len(t_indices))]\n censor_before = np.array(censor_before)\n censor_before = censor_before[:, None].repeat(self._subseq_len, axis=1)\n\n is_last = self.get_from_store(\"is_last\", state_indices, b_indices)\n for i, j in zip(*is_last.nonzero()):\n t, b = self.sample_ep_start()\n b_indices[i, j:] = b\n state_indices[i, j:] = np.arange(self._subseq_len - j) + t\n censor_before[i, j:] = t\n\n state_indices = state_indices.reshape(self._batch_size*self._subseq_len)\n b_indices = b_indices.reshape(self._batch_size*self._subseq_len)\n return state_indices, \\\n b_indices, \\\n np.zeros_like(state_indices)\n\n def sample_index_batch(self, batch_size, subseq_len=None):\n \"\"\"Returns a batch of valid indices sampled uniformly.\n\n Args:\n batch_size: int, number of indices returned.\n\n Returns:\n list of ints, a batch of valid indices sampled uniformly.\n\n Raises:\n RuntimeError: If the batch was not constructed after maximum number of\n tries.\n \"\"\"\n subseq_len = subseq_len or self._subseq_len\n self._rng, rng = jax.random.split(self._rng)\n if self.is_full():\n # add_count >= self._replay_capacity > self._stack_size\n min_id = self.cursor() - self._replay_length + self._stack_size - 1\n max_id = self.cursor() - self._update_horizon - self._subseq_len\n else:\n # add_count < self._replay_capacity\n min_id = self._stack_size - 1\n max_id = self.cursor() - self._update_horizon - self._subseq_len\n if max_id <= min_id:\n raise RuntimeError('Cannot sample a batch with fewer than stack size '\n '({}) + update_horizon ({}) transitions.'.\n format(self._stack_size, self._update_horizon))\n t_indices = np.array(jax.random.randint(rng, (batch_size,), min_id, max_id) % self._replay_length)\n b_indices = np.array(jax.random.randint(rng, (batch_size,), 0, self.n_envs))\n allowed_attempts = self._max_sample_attempts\n t_indices = np.array(t_indices)\n censor_before = np.zeros_like(t_indices)\n for i in range(len(t_indices)):\n is_valid = self.is_valid_transition(t_indices[i].item(), b_indices[i].item())\n ep_start = self.is_ep_start(t_indices[i:i+1], b_indices[i:i+1])\n censor_before[i] = ep_start\n if not is_valid:\n if allowed_attempts == 0:\n raise RuntimeError(\n 'Max sample attempts: Tried {} times but only sampled {}'\n ' valid indices. Batch size is {}'.\n format(self._max_sample_attempts, i, batch_size))\n while not is_valid\\\n and allowed_attempts > 0:\n # If index i is not valid keep sampling others. Note that this\n # is not stratified.\n self._rng, rng = jax.random.split(self._rng)\n t_index = jax.random.randint(rng, (1,), min_id, max_id) % self._replay_length\n b_index = jax.random.randint(rng, (1,), 0, self.n_envs)\n allowed_attempts -= 1\n t_indices[i] = t_index\n b_indices[i] = b_index\n is_valid = self.is_valid_transition(t_indices[i].item(), b_indices[i].item())\n ep_start = self.is_ep_start(t_indices[i:i+1], b_indices[i:i+1])\n censor_before[i] = ep_start\n b_indices = b_indices[:, None].repeat(subseq_len, axis=1).reshape(batch_size*subseq_len)\n state_indices = (t_indices[:, None] + np.arange(subseq_len)[None, :]).reshape(batch_size * subseq_len)\n censor_before = censor_before[:, None].repeat(subseq_len, axis=1).reshape(batch_size*subseq_len)\n return state_indices, b_indices, censor_before\n\n def restore_leading_dims(self, batch_size, jumps, tensor):\n return tensor.reshape(batch_size, jumps, *tensor.shape[1:])\n\n def sample(self, rng=None, *args, **kwargs):\n return self.sample_transition_batch(rng, *args, **kwargs)\n\n def sample_transition_batch(self, rng=None, batch_size=None, indices=None, jumps=None):\n \"\"\"Returns a batch of transitions (including any extra contents).\n If get_transition_elements has been overridden and defines elements not\n stored in self._store, an empty array will be returned and it will be\n left to the child class to fill it. For example, for the child class\n OutOfGraphPrioritizedReplayBuffer, the contents of the\n sampling_probabilities are stored separately in a sum tree.\n When the transition is terminal next_state_batch has undefined contents.\n NOTE: This transition contains the indices of the sampled elements. These\n are only valid during the call to sample_transition_batch, i.e. they may\n be used by subclasses of this replay buffer but may point to different data\n as soon as sampling is done.\n Args:\n batch_size: int, number of transitions returned. If None, the default\n batch_size will be used.\n indices: None or list of ints, the indices of every transition in the\n batch. If None, sample the indices uniformly.\n Returns:\n transition_batch: tuple of np.arrays with the shape and type as in\n get_transition_elements().\n Raises:\n ValueError: If an element to be sampled is missing from the replay buffer.\n \"\"\"\n self._rng = rng if rng is not None else self._rng\n if batch_size is None:\n batch_size = self._batch_size\n if jumps is None:\n jumps = self._subseq_len\n if indices is None:\n if (not self.persistent or self.old_indices is None\n or len(self.old_indices) != batch_size):\n state_indices, b_indices, censor_before = self.sample_index_batch(batch_size, jumps)\n else:\n state_indices, b_indices, censor_before = self.sample_index_batch_persistent(self.old_indices)\n else:\n t_indices, b_indices = indices\n b_indices = b_indices[:, None].repeat(jumps, axis=1).reshape(batch_size*jumps)\n state_indices = (t_indices[:, None] + np.arange(jumps)[None, :]).reshape(batch_size * jumps)\n censor_before = np.zeros_like(state_indices)\n\n assert len(state_indices) == batch_size*jumps\n assert len(b_indices) == batch_size*jumps\n transition_elements = self.get_transition_elements(batch_size)\n\n # shape: horizon X batch_size*jumps\n # Offset by one; a `d\n trajectory_indices = (np.arange(-1, self._update_horizon - 1)[:, None] +\n state_indices[None, :]) % self._replay_length\n trajectory_b_indices = b_indices[None].repeat(self._update_horizon, axis=0)\n trajectory_terminals = self._store[\"terminal\"][trajectory_indices,\n trajectory_b_indices]\n trajectory_terminals[0, :] = 0\n is_terminal_transition = trajectory_terminals.any(0)\n valid_mask = (1 - trajectory_terminals).cumprod(0)\n trajectory_discount_vector = valid_mask * self._cumulative_discount_vector[:, None]\n trajectory_rewards = self._store['reward'][(trajectory_indices + 1) % self._replay_length,\n trajectory_b_indices]\n returns = np.sum(trajectory_discount_vector * trajectory_rewards, axis=0)\n\n next_indices = (state_indices + self._update_horizon) % self._replay_length\n outputs = []\n dict_out = {}\n for element in transition_elements:\n name = element.name\n if name == 'state':\n output = self.parallel_get_stack(\"observation\",\n state_indices,\n b_indices,\n censor_before,)\n output = self.restore_leading_dims(batch_size, jumps, output)\n elif name == 'return':\n # compute the discounted sum of rewards in the trajectory.\n output = returns\n output = self.restore_leading_dims(batch_size, jumps, output)\n elif name == 'reward':\n # compute the discounted sum of rewards in the trajectory.\n output = self._store[\"reward\"][state_indices, b_indices]\n output = self.restore_leading_dims(batch_size, jumps, output)\n elif name == 'next_state':\n output = self.parallel_get_stack(\"observation\",\n next_indices, b_indices,\n censor_before,)\n output = self.restore_leading_dims(batch_size, jumps, output)\n elif name == \"same_trajectory\":\n output = self._store[\"terminal\"][state_indices, b_indices]\n output = self.restore_leading_dims(batch_size, jumps, output)\n output[0, :] = 0\n output = (1 - output).cumprod(1)\n elif name in ('next_action', 'next_reward'):\n output = self._store[name.lstrip('next_')][next_indices, b_indices]\n output = self.restore_leading_dims(batch_size, jumps, output)\n elif element.name == 'terminal':\n output = is_terminal_transition\n output = self.restore_leading_dims(batch_size, jumps, output)\n elif name == 'indices':\n output = self.ravel_indices(state_indices, b_indices).astype(\"int32\")\n output = self.restore_leading_dims(batch_size, jumps, output)[:, 0]\n elif name in self._store.keys():\n output = self._store[name][state_indices, b_indices]\n output = self.restore_leading_dims(batch_size, jumps, output)\n else:\n continue\n outputs.append(output)\n dict_out[element.name] = output\n if indices is None:\n self.old_indices = (state_indices.reshape(batch_size, jumps)[:, -1] + 1,\n b_indices.reshape(batch_size, jumps)[:, -1])\n return outputs, dict_out\n\n def get_transition_elements(self, batch_size=None, jumps=None):\n \"\"\"Returns a 'type signature' for sample_transition_batch.\n Args:\n batch_size: int, number of transitions returned. If None, the default\n batch_size will be used.\n Returns:\n signature: A namedtuple describing the method's return type signature.\n \"\"\"\n jumps = self._subseq_len if jumps is None else jumps\n batch_size = self._batch_size if batch_size is None else batch_size\n\n transition_elements = [\n ReplayElement('state', (batch_size, jumps) + self._state_shape,\n self._observation_dtype),\n ReplayElement('action', (batch_size, jumps) + self._action_shape,\n self._action_dtype),\n ReplayElement('reward', (batch_size, jumps) + self._reward_shape,\n self._reward_dtype),\n ReplayElement('return', (batch_size, jumps) + self._reward_shape,\n self._reward_dtype),\n # ReplayElement('next_state', (batch_size, jumps) + self._state_shape,\n # self._observation_dtype),\n ReplayElement('next_action', (batch_size, jumps) + self._action_shape,\n self._action_dtype),\n ReplayElement('next_reward', (batch_size, jumps) + self._reward_shape,\n self._reward_dtype),\n ReplayElement('terminal', (batch_size, jumps), self._terminal_dtype),\n ReplayElement('same_trajectory', (batch_size, jumps), self._terminal_dtype),\n ReplayElement('indices', (batch_size,), np.int32)\n ]\n for element in self._extra_storage_types:\n transition_elements.append(\n ReplayElement(element.name, (batch_size, jumps) + tuple(element.shape),\n element.type))\n return transition_elements\n\n def _generate_filename(self, checkpoint_dir, name, suffix):\n return os.path.join(checkpoint_dir, '{}_ckpt.{}.gz'.format(name, suffix))\n\n def _return_checkpointable_elements(self):\n \"\"\"Return the dict of elements of the class for checkpointing.\n Returns:\n checkpointable_elements: dict containing all non private (starting with\n _) members + all the arrays inside self._store.\n \"\"\"\n checkpointable_elements = {}\n for member_name, member in self.__dict__.items():\n if member_name == '_store':\n for array_name, array in self._store.items():\n checkpointable_elements[STORE_FILENAME_PREFIX + array_name] = array\n elif not member_name.startswith('_'):\n checkpointable_elements[member_name] = member\n return checkpointable_elements\n\n def save(self, checkpoint_dir, iteration_number):\n \"\"\"Save the OutOfGraphReplayBuffer attributes into a file.\n This method will save all the replay buffer's state in a single file.\n Args:\n checkpoint_dir: str, the directory where numpy checkpoint files should be\n saved.\n iteration_number: int, iteration_number to use as a suffix in naming\n numpy checkpoint files.\n \"\"\"\n if not tf.io.gfile.exists(checkpoint_dir):\n return\n\n checkpointable_elements = self._return_checkpointable_elements()\n\n for attr in checkpointable_elements:\n filename = self._generate_filename(checkpoint_dir, attr, iteration_number)\n with tf.io.gfile.GFile(filename, 'wb') as f:\n with gzip.GzipFile(fileobj=f) as outfile:\n # Checkpoint the np arrays in self._store with np.save instead of\n # pickling the dictionary is critical for file size and performance.\n # STORE_FILENAME_PREFIX indicates that the variable is contained in\n # self._store.\n if attr.startswith(STORE_FILENAME_PREFIX):\n array_name = attr[len(STORE_FILENAME_PREFIX):]\n np.save(outfile, self._store[array_name], allow_pickle=False)\n # Some numpy arrays might not be part of storage\n elif isinstance(self.__dict__[attr], np.ndarray):\n np.save(outfile, self.__dict__[attr], allow_pickle=False)\n else:\n pickle.dump(self.__dict__[attr], outfile)\n\n # After writing a checkpoint file, we garbage collect the checkpoint file\n # that is four versions old.\n stale_iteration_number = iteration_number - CHECKPOINT_DURATION\n if stale_iteration_number >= 0:\n stale_filename = self._generate_filename(checkpoint_dir, attr,\n stale_iteration_number)\n try:\n tf.io.gfile.remove(stale_filename)\n except tf.errors.NotFoundError:\n pass\n\n def load(self, checkpoint_dir, suffix):\n \"\"\"Restores the object from bundle_dictionary and numpy checkpoints.\n Args:\n checkpoint_dir: str, the directory where to read the numpy checkpointed\n files from.\n suffix: str, the suffix to use in numpy checkpoint files.\n Raises:\n NotFoundError: If not all expected files are found in directory.\n \"\"\"\n save_elements = self._return_checkpointable_elements()\n # We will first make sure we have all the necessary files available to avoid\n # loading a partially-specified (i.e. corrupted) replay buffer.\n for attr in save_elements:\n filename = self._generate_filename(checkpoint_dir, attr, suffix)\n if not tf.io.gfile.exists(filename):\n raise tf.errors.NotFoundError(None, None,\n 'Missing file: {}'.format(filename))\n # If we've reached this point then we have verified that all expected files\n # are available.\n for attr in save_elements:\n filename = self._generate_filename(checkpoint_dir, attr, suffix)\n with tf.io.gfile.GFile(filename, 'rb') as f:\n with gzip.GzipFile(fileobj=f) as infile:\n if attr.startswith(STORE_FILENAME_PREFIX):\n array_name = attr[len(STORE_FILENAME_PREFIX):]\n self._store[array_name] = np.load(infile, allow_pickle=False)\n elif isinstance(self.__dict__[attr], np.ndarray):\n self.__dict__[attr] = np.load(infile, allow_pickle=False)\n else:\n self.__dict__[attr] = pickle.load(infile)\n\n\nclass PrioritizedJaxSubsequenceParallelEnvReplayBuffer(\n JaxSubsequenceParallelEnvReplayBuffer):\n \"\"\"Deterministic version of prioritized replay buffer.\"\"\"\n\n def __init__(self,\n observation_shape,\n stack_size,\n replay_capacity,\n batch_size,\n update_horizon=1,\n subseq_len=0,\n n_envs=1,\n gamma=0.99,\n max_sample_attempts=1000,\n extra_storage_types=None,\n observation_dtype=np.uint8,\n terminal_dtype=np.uint8,\n action_shape=(),\n action_dtype=np.int32,\n reward_shape=(),\n reward_dtype=np.float32):\n super().__init__(\n observation_shape=observation_shape,\n stack_size=stack_size,\n replay_capacity=replay_capacity,\n batch_size=batch_size,\n update_horizon=update_horizon,\n gamma=gamma,\n max_sample_attempts=max_sample_attempts,\n extra_storage_types=extra_storage_types,\n observation_dtype=observation_dtype,\n terminal_dtype=terminal_dtype,\n subseq_len=subseq_len,\n n_envs=n_envs,\n action_shape=action_shape,\n action_dtype=action_dtype,\n reward_shape=reward_shape,\n reward_dtype=reward_dtype)\n\n self.sum_tree = sum_tree.DeterministicSumTree(replay_capacity)\n\n def get_add_args_signature(self):\n \"\"\"The signature of the add function.\"\"\"\n parent_add_signature = super().get_add_args_signature()\n add_signature = parent_add_signature + [\n ReplayElement('priority', (), np.float32)\n ]\n return add_signature\n\n def _add(self, *args):\n \"\"\"Internal add method to add to the underlying memory arrays.\"\"\"\n self._check_args_length(*args)\n\n # Use Schaul et al.'s (2015) scheme of setting the priority of new elements\n # to the maximum priority so far.\n # Picks out 'priority' from arguments and adds it to the sum_tree.\n transition = {}\n for i, element in enumerate(self.get_add_args_signature()):\n if element.name == 'priority':\n priority = args[i]\n else:\n transition[element.name] = args[i]\n\n indices = np.ravel_multi_index((np.ones((1,), dtype=\"int32\")*self.cursor(),\n np.arange(self.n_envs)),\n (self._replay_length, self.n_envs))\n\n [self.sum_tree.set(indices[i], priority[i]) for i in range(len(indices))]\n super()._add_transition(transition)\n\n def sample_index_batch(self, batch_size):\n \"\"\"Returns a batch of valid indices sampled as in Schaul et al. (2015).\"\"\"\n # Sample stratified indices. Some of them might be invalid.\n # start = time.time()\n indices = self.sum_tree.stratified_sample(batch_size, self._rng)\n indices = np.array(indices)\n # print(\"Sampling from sum tree took {}\".format(time.time() - start))\n allowed_attempts = self._max_sample_attempts\n\n t_indices, b_indices = self.unravel_indices(indices)\n censor_before = np.zeros_like(t_indices)\n for i in range(len(indices)):\n is_valid = self.is_valid_transition(t_indices[i].item(), b_indices[i].item())\n ep_start = self.is_ep_start(t_indices[i:i+1], b_indices[i:i+1])\n censor_before[i] = ep_start\n if not is_valid:\n if allowed_attempts == 0:\n raise RuntimeError(\n 'Max sample attempts: Tried {} times but only sampled {}'\n ' valid indices. Batch size is {}'.\n format(self._max_sample_attempts, i, batch_size))\n while (not is_valid) and allowed_attempts > 0:\n # If index i is not valid keep sampling others. Note that this\n # is not stratified.\n self._rng, rng = jax.random.split(self._rng)\n index = int(self.sum_tree.sample(rng=rng))\n t_index, b_index = self.unravel_indices(index)\n allowed_attempts -= 1\n t_indices[i] = t_index\n b_indices[i] = b_index\n is_valid = self.is_valid_transition(t_indices[i].item(), b_indices[i].item())\n ep_start = self.is_ep_start(t_indices[i:i+1], b_indices[i:i+1])\n censor_before[i] = ep_start\n return t_indices, b_indices, censor_before\n\n def sample_transition_batch(self, rng, batch_size=None, indices=None):\n \"\"\"Returns a batch of transitions with extra storage and the priorities.\"\"\"\n transition = super().sample_transition_batch(rng, batch_size, indices)\n transition.append(self.get_priority(transition[-1]))\n # import ipdb; ipdb.set_trace()\n # transition_elements = self.get_transition_elements(batch_size)\n # transition_names = [e.name for e in transition_elements]\n # probabilities_index = transition_names.index('sampling_probabilities')\n # indices_index = transition_names.index('indices')\n # indices = transition[indices_index]\n # # The parent returned an empty array for the probabilities. Fill it with the\n # # contents of the sum tree.\n # import ipdb; ipdb.set_trace()\n # transition[probabilities_index][:] = self.get_priority(indices)\n return transition\n\n def set_priority(self, indices, priorities):\n \"\"\"Sets the priority of the given elements according to Schaul et al.\"\"\"\n assert indices.dtype == np.int32, ('Indices must be integers, '\n 'given: {}'.format(indices.dtype))\n for index, priority in zip(indices, priorities):\n self.sum_tree.set(index, priority)\n\n def get_priority(self, indices):\n \"\"\"Fetches the priorities correspond to a batch of memory indices.\"\"\"\n assert indices.shape, 'Indices must be an array.'\n assert indices.dtype == np.int32, ('Indices must be int32s, '\n 'given: {}'.format(indices.dtype))\n priority_batch = self.sum_tree.get(indices)\n return priority_batch\n\n def get_transition_elements(self, batch_size=None):\n \"\"\"Returns a 'type signature' for sample_transition_batch.\"\"\"\n parent_transition_type = (\n super().get_transition_elements(batch_size))\n probablilities_type = [\n ReplayElement('sampling_probabilities', (batch_size,), np.float32)\n ]\n return parent_transition_type + probablilities_type\n"
] |
[
[
"numpy.arange",
"numpy.empty",
"numpy.ones",
"numpy.save",
"numpy.ravel_multi_index",
"numpy.zeros_like",
"numpy.moveaxis",
"numpy.load",
"numpy.array",
"numpy.unravel_index",
"numpy.sum",
"numpy.zeros"
]
] |
judejeh/rom-comma
|
[
"2cace7c4d9d72a35237bc7ddc0f54aec3b9b1d63"
] |
[
"archived - prd/archive/test_functions_rbf_3.py"
] |
[
"# BSD 3-Clause License\n#\n# Copyright (c) 2019, Robert A. Milton\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\" Run this module first thing, to test your installation of romcomma.\n\n**Contents**:\n **predict**: Prediction using a GaussianBundle.\n\n **test_input**: A rudimentary test input, for installation testing.\n\"\"\"\n\nfrom romcomma import distribution, function, data, model\nfrom romcomma.typing_ import NP\nfrom numpy import zeros, eye, pi, full, atleast_2d\nfrom pathlib import Path\nfrom scipy.stats import ortho_group\n\nEFFECTIVELY_ZERO = 1.0E-64\nBASE_PATH = Path('X:\\\\comma_group1\\\\Rom\\\\dat\\\\TestFunctions\\\\Scalar.RBF')\n\n\ndef scalar_function_of_normal(store_name: str, N: int, M: int, X_std: float, noise_std: float, CDF_scale: NP.Array=None, CDF_loc: NP.Array=None,\n pre_function_with_parameters: function.CallableWithParameters = None,\n function_with_parameters: function.CallableWithParameters = None) -> data.Store:\n X_marginal = distribution.Univariate('norm', loc=0, scale=X_std)\n X_dist = distribution.Multivariate.Independent(M=M, marginals=X_marginal)\n noise_dist = (distribution.Multivariate.Normal(mean=zeros(1, dtype=float), covariance=noise_std ** 2 * eye(1, dtype=float))\n if noise_std > EFFECTIVELY_ZERO else None)\n return function.sample(store_dir=store_name, N=N, X_distribution=X_dist,\n X_sample_design=distribution.SampleDesign.LATIN_HYPERCUBE, CDF_scale=CDF_scale,\n CDF_loc=CDF_loc, pre_function_with_parameters=pre_function_with_parameters,\n functions_with_parameters=function_with_parameters,\n noise_distribution=noise_dist, noise_sample_design=distribution.SampleDesign.LATIN_HYPERCUBE)\n\n\ndef run_rbfs(test_fuction: str, N: int, noise_std: bool, random: bool, M: int = 5, K: int = 2 ):\n name = 'rbf'\n kernel_parameters = model.gpy_.Kernel.ExponentialQuadratic.Parameters(lengthscale=full((1, 1), 0.2, dtype=float))\n parameters = model.gpy_.GP.DEFAULT_PARAMETERS._replace(kernel=kernel_parameters, e_floor=1E-6, e=0.003)\n CDF_scale = 2 * pi\n CDF_loc = pi\n if test_fuction == 'sin.1':\n function_with_parameters = function.CallableWithParameters(function.ishigami, parameters={'a': 0.0, 'b': 0.0})\n elif test_fuction == 'sin.2':\n function_with_parameters = function.CallableWithParameters(function.ishigami, parameters={'a': 2.0, 'b': 0.0})\n elif test_fuction == 'ishigami':\n function_with_parameters = function.callable_with_parameters(function.ishigami)\n else:\n CDF_scale = 1.0\n CDF_loc = 0.0\n function_with_parameters = function.callable_with_parameters(function.sobol_g)\n store_name = test_fuction + '.{0:d}.{1:.3f}.{2:d}'.format(M, noise_std, N)\n if random:\n pre_function_with_parameters = function.CallableWithParameters(function=function.linear, parameters={'matrix': ortho_group.rvs(M)})\n store_name += '.random'\n else:\n pre_function_with_parameters = None\n store_name += '.rom'\n store_name = BASE_PATH / store_name\n store = scalar_function_of_normal(store_name=store_name, N=N, M=M, X_std=1.0, noise_std=noise_std, CDF_scale=CDF_scale, CDF_loc=CDF_loc,\n pre_function_with_parameters=pre_function_with_parameters,\n function_with_parameters=function_with_parameters)\n data.Fold.into_K_folds(parent=store, K=K, shuffled_before_folding=False, standard=data.Store.Standard.mean_and_std,\n replace_empty_test_with_data_=True)\n model.run.GPs(module=model.run.Module.GPY_, name=name, store=store, M=-1, parameters=parameters, optimize=True, test=True, sobol=True)\n sobol_options = {'semi_norm': model.base.Sobol.SemiNorm.DEFAULT_META, 'N_exploit': 3, 'N_explore': 4096, 'options': {'gtol': 1.0E-12}}\n rom_options = {'iterations': 6, 'guess_identity_after_iteration': 2, 'sobol_optimizer_options': sobol_options,\n 'gp_initializer': model.base.ROM.GP_Initializer.ORIGINAL,\n 'gp_optimizer_options': model.run.Module.GPY_.value.GP.DEFAULT_OPTIMIZER_OPTIONS}\n model.run.ROMs(module=model.run.Module.GPY_, name='rom', store=store, source_gp_name=name, Mu=-1, Mx=-1, optimizer_options=rom_options)\n\n\ndef run_ards(test_fuction: str, N: int, noise_std: bool, random: bool, M: int = 5, K: int = 2 ):\n name = 'rbf'\n store_name = test_fuction + '.{0:d}.{1:.3f}.{2:d}'.format(M, noise_std, N)\n if random:\n store_name += '.random'\n else:\n store_name += '.rom'\n store = data.Store(BASE_PATH / store_name)\n model.run.GPs(module=model.run.Module.GPY_, name=name, store=store, M=-1, parameters=None, optimize=True, test=True, sobol=True,\n make_ard=True)\n\n\nif __name__ == '__main__':\n for M in (5, ):\n # for M in (5, 10, 15):\n for N in (200, 400, 800, 1600, 3200):\n for random in (False, True):\n for noise_std in (0.005, 0.05):\n # for noise_std in (0, 0.001, 0.005, 0.01, 0.025, 0.05, 0.1):\n run_rbfs(\"ishigami\", N, noise_std, random, M)\n"
] |
[
[
"scipy.stats.ortho_group.rvs",
"numpy.eye",
"numpy.zeros",
"numpy.full"
]
] |
Ostirion-net/ppscore-time_series
|
[
"4292741934b39a94eb3faf3b111a02a2e9afb36a"
] |
[
"tests/test_calculation.py"
] |
[
"# # -*- coding: utf-8 -*-\n\nimport pytest\nimport pandas as pd\nimport numpy as np\n\nimport ppscore as pps\n\n\ndef test__normalized_f1_score():\n from ppscore.calculation import _normalized_f1_score\n\n assert _normalized_f1_score(0.4, 0.5) == 0\n assert _normalized_f1_score(0.75, 0.5) == 0.5\n\n\ndef test__normalized_mae_score():\n from ppscore.calculation import _normalized_mae_score\n\n assert _normalized_mae_score(10, 5) == 0\n assert _normalized_mae_score(5, 10) == 0.5\n\n\ndef test__determine_case_and_prepare_df():\n from ppscore.calculation import _determine_case_and_prepare_df\n\n df = pd.read_csv(\"examples/titanic.csv\")\n df = df.rename(\n columns={\n \"Age\": \"Age_float\",\n \"Pclass\": \"Pclass_integer\",\n \"Survived\": \"Survived_integer\",\n \"Ticket\": \"Ticket_object\",\n \"Name\": \"Name_object_id\",\n }\n )\n\n df[\"x\"] = 1 # x is irrelevant for this test\n df[\"constant\"] = 1\n df[\"Pclass_category\"] = df[\"Pclass_integer\"].astype(\"category\")\n df[\"Pclass_datetime\"] = pd.to_datetime(\n df[\"Pclass_integer\"], infer_datetime_format=True\n )\n df[\"Survived_boolean\"] = df[\"Survived_integer\"].astype(bool)\n df[\"Cabin_string\"] = pd.Series(df[\"Cabin\"].apply(str), dtype=\"string\")\n\n # check regression\n assert _determine_case_and_prepare_df(df, \"x\", \"Age_float\")[1] == \"regression\"\n assert _determine_case_and_prepare_df(df, \"x\", \"Pclass_integer\")[1] == \"regression\"\n\n # check classification\n assert (\n _determine_case_and_prepare_df(df, \"x\", \"Pclass_category\")[1]\n == \"classification\"\n )\n assert (\n _determine_case_and_prepare_df(df, \"x\", \"Survived_boolean\")[1]\n == \"classification\"\n )\n assert (\n _determine_case_and_prepare_df(df, \"x\", \"Ticket_object\")[1] == \"classification\"\n )\n assert (\n _determine_case_and_prepare_df(df, \"x\", \"Cabin_string\")[1] == \"classification\"\n )\n\n # check special cases\n assert (\n _determine_case_and_prepare_df(df, \"Name_object_id\", \"x\")[1] == \"feature_is_id\"\n )\n assert _determine_case_and_prepare_df(df, \"x\", \"x\")[1] == \"predict_itself\"\n assert (\n _determine_case_and_prepare_df(df, \"x\", \"constant\")[1] == \"target_is_constant\"\n )\n assert (\n _determine_case_and_prepare_df(df, \"x\", \"Name_object_id\")[1] == \"target_is_id\"\n )\n assert (\n _determine_case_and_prepare_df(df, \"x\", \"Pclass_datetime\")[1]\n == \"target_is_datetime\"\n )\n\n\ndef test__maybe_sample():\n from ppscore.calculation import _maybe_sample\n\n df = pd.read_csv(\"examples/titanic.csv\")\n assert len(_maybe_sample(df, 10)) == 10\n\n\ndef test_score():\n df = pd.DataFrame()\n df[\"x\"] = np.random.uniform(-2, 2, 1_000)\n df[\"error\"] = np.random.uniform(-0.5, 0.5, 1_000)\n df[\"y\"] = df[\"x\"] * df[\"x\"] + df[\"error\"]\n\n df[\"constant\"] = 1\n df = df.reset_index()\n df[\"id\"] = df[\"index\"].astype(str)\n\n df[\"x_greater_0_boolean\"] = df[\"x\"] > 0\n # df[\"x_greater_0_string\"] = df[\"x_greater_0_boolean\"].astype(str)\n df[\"x_greater_0_string\"] = pd.Series(\n df[\"x_greater_0_boolean\"].apply(str), dtype=\"string\"\n )\n df[\"x_greater_0_string_object\"] = df[\"x_greater_0_string\"].astype(\"object\")\n df[\"x_greater_0_string_category\"] = df[\"x_greater_0_string\"].astype(\"category\")\n\n df[\"x_greater_0_boolean_object\"] = df[\"x_greater_0_boolean\"].astype(\"object\")\n df[\"x_greater_0_boolean_category\"] = df[\"x_greater_0_boolean\"].astype(\"category\")\n\n df[\"nan\"] = np.nan\n\n duplicate_column_names_df = pd.DataFrame()\n duplicate_column_names_df[\"x1\"] = np.random.uniform(-2, 2, 10)\n duplicate_column_names_df[\"x2\"] = np.random.uniform(-2, 2, 10)\n duplicate_column_names_df[\"unique_column_name\"] = np.random.uniform(-2, 2, 10)\n duplicate_column_names_df.columns = [\n \"duplicate_column_name\",\n \"duplicate_column_name\",\n \"unique_column_name\",\n ]\n\n # check input types\n with pytest.raises(TypeError):\n numpy_array = np.random.randn(10, 10) # not a DataFrame\n pps.score(numpy_array, \"x\", \"y\")\n\n with pytest.raises(ValueError):\n pps.score(df, \"x_column_that_does_not_exist\", \"y\")\n\n with pytest.raises(ValueError):\n pps.score(df, \"x\", \"y_column_that_does_not_exist\")\n\n with pytest.raises(AttributeError):\n # the task argument is not supported any more\n pps.score(df, \"x\", \"y\", task=\"classification\")\n\n with pytest.raises(AssertionError):\n # df shall not have duplicate column names\n pps.score(\n duplicate_column_names_df, \"duplicate_column_name\", \"unique_column_name\"\n )\n\n with pytest.raises(AssertionError):\n # df shall not have duplicate column names\n pps.score(\n duplicate_column_names_df, \"unique_column_name\", \"duplicate_column_name\"\n )\n\n # check cross_validation\n # if more folds than data, there is an error\n with pytest.raises(ValueError):\n assert pps.score(df, \"x\", \"y\", cross_validation=2000, catch_errors=False)\n\n # check random_seed\n assert pps.score(df, \"x\", \"y\", random_seed=1) == pps.score(\n df, \"x\", \"y\", random_seed=1\n )\n assert pps.score(df, \"x\", \"y\", random_seed=1) != pps.score(\n df, \"x\", \"y\", random_seed=2\n )\n # the random seed that is drawn automatically is smaller than <1000\n assert pps.score(df, \"x\", \"y\") != pps.score(df, \"x\", \"y\", random_seed=123_456)\n\n # check invalid_score\n invalid_score = -99\n assert (\n pps.score(df, \"nan\", \"y\", invalid_score=invalid_score)[\"ppscore\"]\n == invalid_score\n )\n\n # check catch_errors using the cross_validation error from above\n assert pps.score(df, \"x\", \"y\", cross_validation=2000, invalid_score=invalid_score, catch_errors=True)[\"ppscore\"] == invalid_score\n\n # check case discrimination\n assert pps.score(df, \"x\", \"y\")[\"case\"] == \"regression\"\n assert pps.score(df, \"x\", \"x_greater_0_string\")[\"case\"] == \"classification\"\n assert pps.score(df, \"x\", \"constant\")[\"case\"] == \"target_is_constant\"\n assert pps.score(df, \"x\", \"x\")[\"case\"] == \"predict_itself\"\n assert pps.score(df, \"x\", \"id\")[\"case\"] == \"target_is_id\"\n assert pps.score(df, \"nan\", \"y\")[\"case\"] == \"empty_dataframe_after_dropping_na\"\n\n # check scores\n # feature is id\n assert pps.score(df, \"id\", \"y\")[\"ppscore\"] == 0\n\n # numeric feature and target\n assert pps.score(df, \"x\", \"y\")[\"ppscore\"] > 0.5\n assert pps.score(df, \"y\", \"x\")[\"ppscore\"] < 0.05\n\n # boolean feature or target\n assert pps.score(df, \"x\", \"x_greater_0_boolean\")[\"ppscore\"] > 0.6\n assert pps.score(df, \"x_greater_0_boolean\", \"x\")[\"ppscore\"] < 0.6\n\n # string feature or target\n assert pps.score(df, \"x\", \"x_greater_0_string\")[\"ppscore\"] > 0.6\n assert pps.score(df, \"x_greater_0_string\", \"x\")[\"ppscore\"] < 0.6\n\n # object feature or target\n assert pps.score(df, \"x\", \"x_greater_0_string_object\")[\"ppscore\"] > 0.6\n assert pps.score(df, \"x_greater_0_string_object\", \"x\")[\"ppscore\"] < 0.6\n\n # category feature or target\n assert pps.score(df, \"x\", \"x_greater_0_string_category\")[\"ppscore\"] > 0.6\n assert pps.score(df, \"x_greater_0_string_category\", \"x\")[\"ppscore\"] < 0.6\n\n # object feature or target\n assert pps.score(df, \"x\", \"x_greater_0_boolean_object\")[\"ppscore\"] > 0.6\n assert pps.score(df, \"x_greater_0_boolean_object\", \"x\")[\"ppscore\"] < 0.6\n\n # category feature or target\n assert pps.score(df, \"x\", \"x_greater_0_boolean_category\")[\"ppscore\"] > 0.6\n assert pps.score(df, \"x_greater_0_boolean_category\", \"x\")[\"ppscore\"] < 0.6\n\n\ndef test_predictors():\n y = \"Survived\"\n df = pd.read_csv(\"examples/titanic.csv\")\n df = df[[\"Age\", y]]\n\n duplicate_column_names_df = pd.DataFrame()\n duplicate_column_names_df[\"x1\"] = np.random.uniform(-2, 2, 10)\n duplicate_column_names_df[\"x2\"] = np.random.uniform(-2, 2, 10)\n duplicate_column_names_df[\"unique_column_name\"] = np.random.uniform(-2, 2, 10)\n duplicate_column_names_df.columns = [\n \"duplicate_column_name\",\n \"duplicate_column_name\",\n \"unique_column_name\",\n ]\n\n # check input types\n with pytest.raises(TypeError):\n numpy_array = np.random.randn(10, 10) # not a DataFrame\n pps.predictors(numpy_array, y)\n\n with pytest.raises(ValueError):\n pps.predictors(df, \"y_column_that_does_not_exist\")\n\n with pytest.raises(ValueError):\n pps.predictors(df, y, output=\"invalid_output_type\")\n\n with pytest.raises(ValueError):\n pps.predictors(df, y, sorted=\"invalid_value_for_sorted\")\n\n with pytest.raises(AssertionError):\n # df shall not have duplicate column names\n pps.predictors(duplicate_column_names_df, \"duplicate_column_name\")\n\n # check return types\n result_df = pps.predictors(df, y)\n assert isinstance(result_df, pd.DataFrame)\n assert not y in result_df.index\n\n list_of_dicts = pps.predictors(df, y, output=\"list\")\n assert isinstance(list_of_dicts, list)\n assert isinstance(list_of_dicts[0], dict)\n\n # the underlying calculations are tested as part of test_score\n\n\ndef test_matrix():\n df = pd.read_csv(\"examples/titanic.csv\")\n df = df[[\"Age\", \"Survived\"]]\n df[\"Age_datetime\"] = pd.to_datetime(df[\"Age\"], infer_datetime_format=True)\n subset_df = df[[\"Survived\", \"Age_datetime\"]]\n\n # check input types\n with pytest.raises(TypeError):\n numpy_array = np.random.randn(10, 10) # not a DataFrame\n pps.matrix(numpy_array)\n\n with pytest.raises(ValueError):\n pps.matrix(df, output=\"invalid_output_type\")\n\n # check return types\n assert isinstance(pps.matrix(df), pd.DataFrame)\n assert isinstance(pps.matrix(df, output=\"list\"), list)\n\n # matrix catches single score errors under the hood\n invalid_score = [\n score\n for score in pps.matrix(subset_df, output=\"list\")\n if (score[\"x\"] == \"Survived\" and score[\"y\"] == \"Age_datetime\")\n ][0]\n assert invalid_score[\"ppscore\"] == 0\n"
] |
[
[
"pandas.read_csv",
"pandas.to_datetime",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.random.uniform"
]
] |
magnusoy/Sparkie
|
[
"428b716a50cd0c274670971ee007e82571a04a80"
] |
[
"python/src/deprecated/vision/depth_camera.py"
] |
[
"# -*- coding: utf-8 -*-\n\n\"\"\"\n__author__ = \"Magnus Kvendseth Øye\"\n__copyright__ = \"Copyright 2020, Sparkie Quadruped Robot\"\n__credits__ = [\"Magnus Kvendseth Øye\", \"Petter Drønnen\", \"Vegard Solheim\"]\n__version__ = \"1.0.0\"\n__license__ = \"MIT\"\n__maintainer__ = \"Magnus Kvendseth Øye\"\n__email__ = \"[email protected]\"\n__status__ = \"Development\"\n\"\"\"\n\nimport cv2\nimport time\nimport numpy as np\nfrom enum import IntEnum\nimport pyrealsense2 as rs\n\n# Importing from local source\nfrom communication.publisher import Publisher\n\n\nclass Preset(IntEnum):\n Custom = 0\n Default = 1\n Hand = 2\n HighAccuracy = 3\n HighDensity = 4\n MediumDensity = 5\n\nclass DepthCamera(Publisher):\n\n def __init__(self, color, ip, port, topic, interval):\n Publisher.__init__(self, ip, port, topic)\n self.interval = interval\n self.lastUpdate = self.millis(self)\n\n pipe = rs.pipeline()\n cfg = rs.config()\n cfg.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)\n cfg.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)\n\n profile = pipe.start(cfg)\n depth_sensor = profile.get_device().first_depth_sensor()\n depth_sensor.set_option(rs.option.visual_preset, Preset.HighAccuracy)\n \n\n self.running = True\n self.color = color\n \n zero_vec = (0.0, 0.0, 0.0)\n self.depth_frame = zero_vec\n self.color_frame = zero_vec\n self.depth_colormap = zero_vec\n\n # Processing blocks\n self.pc = rs.pointcloud()\n self.decimate = rs.decimation_filter()\n #self.decimate.set_option(rs.option.filter_magnitude, 2 ** self.decimate)\n self.colorizer = rs.colorizer()\n \n @staticmethod\n def millis(self):\n \"\"\"Returns the current time in milliseconds\n Returns\n -------\n current time in milliseconds\n \"\"\"\n\n return int(round(time.time() * 1000))\n \n def poll(self):\n try:\n frames = self.pipe.wait_for_frames()\n self.depth_frame = frames.get_depth_frame()\n self.color_frame = frames.get_color_frame()\n color_img = np.asanyarray(self.color_frame.get_data())\n self.depth_colormap = np.asanyarray(self.colorizer.colorize(self.depth_frame).get_data())\n \n if self.color:\n mapped_frame, color_source = self.color_frame, color_img\n else:\n mapped_frame, color_source = self.depth_frame, self.depth_colormap\n \n points = self.pc.calculate(self.depth_frame)\n self.pc.map_to(mapped_frame)\n \n v, t = points.get_vertices(), points.get_texture_coordinates()\n verts = np.asanyarray(v).view(np.float32).reshape(-1, 3) # xyz\n texcoords = np.asanyarray(t).view(np.float32).reshape(-1, 2) # uv\n\n \n #depth_frame = self.decimate.process(depth_frame)\n except Exception as e:\n print(e)\n return\n \n def run(self):\n\n self.initialize()\n try:\n while self.running:\n #now = self.millis(self)\n #timeDifference = now - self.lastUpdate\n #if timeDifference >= self.interval:\n self.poll()\n self.publish_depth_frame()\n self.publish_color_frame()\n self.publish_depth_colormap()\n time.sleep(self.interval//1000)\n # self.lastUpdate = now\n finally:\n self.pipe.stop()\n \n def publish_depth_frame(self):\n Publisher.topic = 'depth'\n self.send(self.depth_frame)\n \n def publish_color_frame(self):\n Publisher.topic = 'img'\n self.send(self.color_frame)\n \n def publish_depth_colormap(self):\n Publisher.topic = 'colormap'\n self.send(self.depth_colormap)\n \n\nif __name__ == \"__main__\":\n pass\n #dc = DepthCamera()\n #while True:\n # dc.poll()\n"
] |
[
[
"numpy.asanyarray"
]
] |
wpv1999/twitchslam
|
[
"c52a14fe1034426b6dfc1e6222984a9a06e40b6a"
] |
[
"optimize_crappy.py"
] |
[
"from helpers import add_ones\n#import autograd.numpy as np\nfrom scipy.optimize import least_squares, leastsq\nimport cv2\n\nimport numpy as np\nimport sympy as sp\nfrom sympy.utilities.autowrap import autowrap, ufuncify, binary_function\nfrom sympy.printing.ccode import ccode\n\nEPS = 1e-10\n\ndef rotation_from_matrix(R):\n return cv2.Rodrigues(R)[0].flatten()\n\ndef rotation_to_matrix(w):\n wx,wy,wz = w\n theta = sp.sqrt(wx**2 + wy**2 + wz**2 + wy**2 + wz**2) + EPS\n omega = sp.Matrix([[0,-wz,wy],\n [wz,0,-wx],\n [-wy,wx,0]])\n R = sp.eye(3) +\\\n omega*(sp.sin(theta)/theta) +\\\n (omega*omega)*((1-sp.cos(theta))/(theta*theta))\n return R\n\n\"\"\"\n# test these\nassert(np.allclose(np.eye(3), rotation_to_matrix(np.array([0,0,0]))))\nfor i in range(20):\n w = np.random.randn(3)\n what = rotation_from_matrix(rotation_to_matrix(w))\n assert(np.allclose(w, what))\n\"\"\"\n\ndef optimize(frames, points, *args):\n # get point location guesses + camera poses (initial parameter vector)\n x0 = []\n for p in points:\n x0.append(p.pt)\n for f in frames:\n t = f.pose[:3, 3]\n R = f.pose[:3, :3]\n w = rotation_from_matrix(R)\n x0.append(t)\n x0.append(w)\n x0 = np.array(x0).flatten()\n\n # get target residuals (measurement vector)\n uvs = []\n for p in points:\n for f, idx in zip(p.frames, p.idxs):\n uv = f.kps[idx]\n uvs.append(uv)\n b = np.array(uvs).flatten()\n\n # f(ptw(9)) -> uv(2)\n def proj(p, t, w):\n R = rotation_to_matrix(w)\n proj = (R * p)+t\n return (proj[0] / proj[2], proj[1] / proj[2])\n\n def get_symbolic_jacobians():\n p = sp.Matrix(sp.symbols(\"px py pz\"))\n t = sp.Matrix(sp.symbols(\"tx ty tz\"))\n w = sp.Matrix(sp.symbols(\"wx wy wz\"))\n uv = sp.Matrix(proj(p, t, w))\n fuv = autowrap(uv)\n fjp = autowrap(uv.jacobian(p))\n fjt = autowrap(uv.jacobian(t))\n fjw = autowrap(uv.jacobian(w))\n return fuv,fjp,fjt,fjw\n fuv,fjp,fjt,fjw = get_symbolic_jacobians()\n\n # compute residuals f(x) = b'\n def res(x):\n J = np.zeros((b.shape[0], x0.shape[0]))\n ret = []\n j = 0\n for i, p in enumerate(points):\n for f, idx in zip(p.frames, p.idxs):\n pt = x[i*3:(i+1)*3]\n fidx = len(points)*3 + f.id*6\n tw = x[fidx:fidx+6]\n ptw = np.concatenate([pt, tw], axis=0).tolist()\n\n uv = fuv(*ptw)\n J[j*2:(j+1)*2, i*3:(i+1)*3] = fjp(*ptw)\n J[j*2:(j+1)*2, fidx:fidx+3] = fjt(*ptw)\n J[j*2:(j+1)*2, fidx+3:fidx+6] = fjw(*ptw)\n\n j += 1\n ret.append(uv)\n return np.array(ret).flatten(), J\n\n bhat, J = res(x0)\n print(J)\n print(J.shape)\n print(np.sum((bhat-b)**2))\n \n exit(0)\n\n # TODO: actually do this\n # http://www.cs.technion.ac.il/users/wwwb/cgi-bin/tr-get.cgi/2014/MSC/MSC-2014-16.pdf page 17\n # http://www.telesens.co/2016/10/13/bundle-adjustment-part-1-jacobians/ defines 2x3 jacobian\n\n # define function fun(parameter) = measurement\n\n def fun(x):\n return np.sum((res(x)-b)**2)\n\n # stack poses\n grad_fun = grad(fun)\n print(\"computing at x0\")\n\n # gradient descent\n for i in range(20):\n loss = fun(x0)\n d = grad_fun(x0)\n print(loss, d)\n x0 -= d\n\n \"\"\"\n poses = []\n for i, p in enumerate(self.points):\n for f, idx in zip(p.frames, p.idxs):\n poses.append(f.pose[:3])\n poses = np.concatenate(poses, axis=1)\n print(poses.shape)\n\n loss = np.dot(poses, x0)\n print(loss)\n \"\"\"\n\n print(\"running least squares with %d params\" % len(x0))\n \n"
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
AparCode/mask_identifier
|
[
"44abfcd686c4c39eb2f595ab425e41bd6574611a"
] |
[
"show_finished_image.py"
] |
[
"import cv2\nimport detect_faces as data\nimport numpy as np\nimport cnn_model_setup as cm\nimport torch\nimport model as m\nimport social_distancing as sd\n\ndef convert_image(image, model, landmarks, bounding_boxes, resized_crop, bgr=True, resize=True):\n \"\"\"Uses faces found from the detect_faces file and a trained convolutional neural network model to draw green boxes \n around the faces with masks on, and draw red boxes around the faces without masks on.\n If desired, resize the original image to be of height 1000 px or width 1000 px for easier processing.\n Parameters:\n -----------\n image: np.ndarray, describes image to be displayed\n model: Model, trained model that will predict mask/no mask category for each face\n bounding_boxes: list, list of coordinates for each face (returned by data.find_faces)\n resized_crop: list, list of faces in image, (returned by data.find_faces)\n bgr: boolean (optional), True = bgr images, need to be converted (cv2.readimg was used)\n False = rgb image, does not need to be converted\n resize: boolean (optional), True = resize image to be of height/width 1000px if original\n image had height/width smaller than 500px\n False = don't resize image\n Returns:\n --------\n np.ndarray\n Describes image that now has boxes around faces and text describing mask/no mask\n \"\"\"\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n height = image.shape[0]\n width = image.shape[1]\n \n if resize and (height < 500 or width < 500):\n if height > width:\n sf = 1000 / height\n else:\n sf = 1000 / width\n else:\n sf = 1\n\n if bgr:\n red = (255, 0, 0)\n else:\n red = (0, 0, 255)\n\n image = cv2.resize(image, (np.rint(width*sf).astype(np.int), np.rint(height*sf).astype(np.int)))\n bounding_boxes = np.rint(bounding_boxes*sf).astype(np.int)\n landmarks = np.rint(landmarks*sf).astype(np.int)\n\n convertedOne, convertedTwo = m.convert_data(resized_crop)\n converted = np.append(convertedOne, convertedTwo, axis=0)\n\n preds = model(torch.Tensor(converted).to(device))\n preds = np.argmax(preds.data, axis=1)\n num_wearing_masks = np.count_nonzero(preds)\n\n for box, pred in zip(bounding_boxes, preds):\n if pred==1:\n color = (0, 255, 0)\n text = \"Mask\"\n else:\n color = red\n text = \"No Mask\"\n \n image = cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), color, 2)\n\n if box[1] > (height*sf) - 10:\n image = cv2.putText(image, text, (box[0], box[1]-10), cv2.FONT_HERSHEY_COMPLEX, 0.5, color, 2)\n else:\n image = cv2.putText(image, text, (box[0], box[3]+15), cv2.FONT_HERSHEY_COMPLEX, 0.5, color, 1)\n\n if bgr:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image, num_wearing_masks"
] |
[
[
"torch.Tensor",
"numpy.rint",
"numpy.append",
"numpy.argmax",
"numpy.count_nonzero",
"torch.cuda.is_available"
]
] |
ZHUI/Paddle
|
[
"32ae8e81322ed380a89157fcb632c229e2c64979"
] |
[
"python/paddle/incubate/hapi/tests/test_dataset_cifar.py"
] |
[
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport os\nimport numpy as np\nimport tempfile\nimport shutil\nimport cv2\n\nfrom paddle.incubate.hapi.datasets import *\nfrom paddle.incubate.hapi.datasets.utils import _check_exists_and_download\n\n\nclass TestCifar10Train(unittest.TestCase):\n def test_main(self):\n cifar = Cifar10(mode='train')\n self.assertTrue(len(cifar) == 50000)\n\n # traversal whole dataset may cost a\n # long time, randomly check 1 sample\n idx = np.random.randint(0, 50000)\n data, label = cifar[idx]\n self.assertTrue(len(data.shape) == 1)\n self.assertTrue(data.shape[0] == 3072)\n self.assertTrue(0 <= int(label) <= 9)\n\n\nclass TestCifar10Test(unittest.TestCase):\n def test_main(self):\n cifar = Cifar10(mode='test')\n self.assertTrue(len(cifar) == 10000)\n\n # traversal whole dataset may cost a\n # long time, randomly check 1 sample\n idx = np.random.randint(0, 10000)\n data, label = cifar[idx]\n self.assertTrue(len(data.shape) == 1)\n self.assertTrue(data.shape[0] == 3072)\n self.assertTrue(0 <= int(label) <= 9)\n\n\nclass TestCifar100Train(unittest.TestCase):\n def test_main(self):\n cifar = Cifar100(mode='train')\n self.assertTrue(len(cifar) == 50000)\n\n # traversal whole dataset may cost a\n # long time, randomly check 1 sample\n idx = np.random.randint(0, 50000)\n data, label = cifar[idx]\n self.assertTrue(len(data.shape) == 1)\n self.assertTrue(data.shape[0] == 3072)\n self.assertTrue(0 <= int(label) <= 99)\n\n\nclass TestCifar100Test(unittest.TestCase):\n def test_main(self):\n cifar = Cifar100(mode='test')\n self.assertTrue(len(cifar) == 10000)\n\n # traversal whole dataset may cost a\n # long time, randomly check 1 sample\n idx = np.random.randint(0, 10000)\n data, label = cifar[idx]\n self.assertTrue(len(data.shape) == 1)\n self.assertTrue(data.shape[0] == 3072)\n self.assertTrue(0 <= int(label) <= 99)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.random.randint"
]
] |
MagdalenaMl/pykeen
|
[
"611daed766a6763c30026e5d21c4ba7647f4fcda"
] |
[
"src/pykeen/triples/triples_factory.py"
] |
[
"# -*- coding: utf-8 -*-\n\n\"\"\"Implementation of basic instance factory which creates just instances based on standard KG triples.\"\"\"\n\nimport dataclasses\nimport itertools\nimport logging\nimport pathlib\nimport re\nfrom typing import Any, Callable, Collection, Dict, List, Mapping, Optional, Sequence, Set, TextIO, Type, Union, cast\n\nimport numpy as np\nimport pandas as pd\nimport torch\n\nfrom .instances import Instances, LCWAInstances, SLCWAInstances\nfrom .splitting import split\nfrom .utils import get_entities, get_relations, load_triples\nfrom ..typing import EntityMapping, LabeledTriples, MappedTriples, RelationMapping, TorchRandomHint\nfrom ..utils import compact_mapping, format_relative_comparison, invert_mapping, torch_is_in_1d\n\n__all__ = [\n 'CoreTriplesFactory',\n 'TriplesFactory',\n 'create_entity_mapping',\n 'create_relation_mapping',\n 'INVERSE_SUFFIX',\n 'cat_triples',\n 'splits_steps',\n 'splits_similarity',\n]\n\nlogger = logging.getLogger(__name__)\n\nINVERSE_SUFFIX = '_inverse'\nTRIPLES_DF_COLUMNS = ('head_id', 'head_label', 'relation_id', 'relation_label', 'tail_id', 'tail_label')\n\n\ndef create_entity_mapping(triples: LabeledTriples) -> EntityMapping:\n \"\"\"Create mapping from entity labels to IDs.\n\n :param triples: shape: (n, 3), dtype: str\n \"\"\"\n # Split triples\n heads, tails = triples[:, 0], triples[:, 2]\n # Sorting ensures consistent results when the triples are permuted\n entity_labels = sorted(set(heads).union(tails))\n # Create mapping\n return {\n str(label): i\n for (i, label) in enumerate(entity_labels)\n }\n\n\ndef create_relation_mapping(relations: set) -> RelationMapping:\n \"\"\"Create mapping from relation labels to IDs.\n\n :param relations: set\n \"\"\"\n # Sorting ensures consistent results when the triples are permuted\n relation_labels = sorted(\n set(relations),\n key=lambda x: (re.sub(f'{INVERSE_SUFFIX}$', '', x), x.endswith(f'{INVERSE_SUFFIX}')),\n )\n # Create mapping\n return {\n str(label): i\n for (i, label) in enumerate(relation_labels)\n }\n\n\ndef _map_triples_elements_to_ids(\n triples: LabeledTriples,\n entity_to_id: EntityMapping,\n relation_to_id: RelationMapping,\n) -> MappedTriples:\n \"\"\"Map entities and relations to pre-defined ids.\"\"\"\n if triples.size == 0:\n logger.warning('Provided empty triples to map.')\n return torch.empty(0, 3, dtype=torch.long)\n\n # When triples that don't exist are trying to be mapped, they get the id \"-1\"\n entity_getter = np.vectorize(entity_to_id.get)\n head_column = entity_getter(triples[:, 0:1], [-1])\n tail_column = entity_getter(triples[:, 2:3], [-1])\n relation_getter = np.vectorize(relation_to_id.get)\n relation_column = relation_getter(triples[:, 1:2], [-1])\n\n # Filter all non-existent triples\n head_filter = head_column < 0\n relation_filter = relation_column < 0\n tail_filter = tail_column < 0\n num_no_head = head_filter.sum()\n num_no_relation = relation_filter.sum()\n num_no_tail = tail_filter.sum()\n\n if (num_no_head > 0) or (num_no_relation > 0) or (num_no_tail > 0):\n logger.warning(\n f\"You're trying to map triples with {num_no_head + num_no_tail} entities and {num_no_relation} relations\"\n f\" that are not in the training set. These triples will be excluded from the mapping.\",\n )\n non_mappable_triples = (head_filter | relation_filter | tail_filter)\n head_column = head_column[~non_mappable_triples, None]\n relation_column = relation_column[~non_mappable_triples, None]\n tail_column = tail_column[~non_mappable_triples, None]\n logger.warning(\n f\"In total {non_mappable_triples.sum():.0f} from {triples.shape[0]:.0f} triples were filtered out\",\n )\n\n triples_of_ids = np.concatenate([head_column, relation_column, tail_column], axis=1)\n\n triples_of_ids = np.array(triples_of_ids, dtype=np.int64)\n # Note: Unique changes the order of the triples\n # Note: Using unique means implicit balancing of training samples\n unique_mapped_triples = np.unique(ar=triples_of_ids, axis=0)\n return torch.tensor(unique_mapped_triples, dtype=torch.long)\n\n\ndef _get_triple_mask(\n ids: Collection[int],\n triples: MappedTriples,\n columns: Union[int, Collection[int]],\n invert: bool = False,\n max_id: Optional[int] = None,\n) -> torch.BoolTensor:\n # normalize input\n triples = triples[:, columns]\n if isinstance(columns, int):\n columns = [columns]\n mask = torch_is_in_1d(\n query_tensor=triples,\n test_tensor=ids,\n max_id=max_id,\n invert=invert,\n )\n if len(columns) > 1:\n mask = mask.all(dim=-1)\n return mask\n\n\ndef _ensure_ids(\n labels_or_ids: Union[Collection[int], Collection[str]],\n label_to_id: Mapping[str, int],\n) -> Collection[int]:\n \"\"\"Convert labels to IDs.\"\"\"\n return [\n label_to_id[l_or_i] if isinstance(l_or_i, str) else l_or_i\n for l_or_i in labels_or_ids\n ]\n\n\[email protected]\nclass Labeling:\n \"\"\"A mapping between labels and IDs.\"\"\"\n\n #: The mapping from labels to IDs.\n label_to_id: Mapping[str, int]\n\n #: The inverse mapping for label_to_id; initialized automatically\n id_to_label: Mapping[int, str] = dataclasses.field(init=False)\n\n #: A vectorized version of entity_label_to_id; initialized automatically\n _vectorized_mapper: Callable[..., np.ndarray] = dataclasses.field(init=False)\n\n #: A vectorized version of entity_id_to_label; initialized automatically\n _vectorized_labeler: Callable[..., np.ndarray] = dataclasses.field(init=False)\n\n def __post_init__(self):\n \"\"\"Precompute inverse mappings.\"\"\"\n self.id_to_label = invert_mapping(mapping=self.label_to_id)\n self._vectorized_mapper = np.vectorize(self.label_to_id.get)\n self._vectorized_labeler = np.vectorize(self.id_to_label.get)\n\n def label(\n self,\n ids: Union[int, Sequence[int], np.ndarray, torch.LongTensor],\n unknown_label: str = \"unknown\",\n ) -> np.ndarray:\n \"\"\"Convert IDs to labels.\"\"\"\n # Normalize input\n if isinstance(ids, torch.Tensor):\n ids = ids.cpu().numpy()\n if isinstance(ids, int):\n ids = [ids]\n ids = np.asanyarray(ids)\n # label\n return self._vectorized_labeler(ids, (unknown_label,))\n\n\[email protected]\nclass CoreTriplesFactory:\n \"\"\"Create instances from ID-based triples.\"\"\"\n\n def __init__(\n self,\n mapped_triples: MappedTriples,\n num_entities: int,\n num_relations: int,\n entity_ids: Collection[int],\n relation_ids: Collection[int],\n create_inverse_triples: bool = False,\n metadata: Optional[Mapping[str, Any]] = None,\n ):\n \"\"\"\n Create the triples factory.\n\n :param mapped_triples: shape: (n, 3)\n A three-column matrix where each row are the head identifier, relation identifier, then tail identifier.\n :param num_entities:\n The number of entities.\n :param num_relations:\n The number of relations.\n :param create_inverse_triples:\n Whether to create inverse triples.\n :param metadata:\n Arbitrary metadata to go with the graph\n \"\"\"\n super().__init__()\n self.mapped_triples = mapped_triples\n self._num_entities = num_entities\n self._num_relations = num_relations\n self.entity_ids = entity_ids\n self.relation_ids = relation_ids\n self.create_inverse_triples = create_inverse_triples\n if metadata is None:\n metadata = dict()\n self.metadata = metadata\n\n @classmethod\n def create(\n cls,\n mapped_triples: MappedTriples,\n num_entities: Optional[int] = None,\n num_relations: Optional[int] = None,\n entity_ids: Collection[int] = None,\n relation_ids: Collection[int] = None,\n create_inverse_triples: bool = False,\n metadata: Optional[Mapping[str, Any]] = None,\n ) -> \"CoreTriplesFactory\":\n \"\"\"\n Create a triples factory without any label information.\n\n :param mapped_triples: shape: (n, 3)\n The ID-based triples.\n :param num_entities:\n The number of entities. If not given, inferred from mapped_triples.\n :param num_relations:\n The number of relations. If not given, inferred from mapped_triples.\n :param create_inverse_triples:\n Whether to create inverse triples.\n :param metadata:\n Additional metadata to store in the factory.\n\n :return:\n A new triples factory.\n \"\"\"\n if num_entities is None:\n num_entities = mapped_triples[:, [0, 2]].max().item() + 1\n if num_relations is None:\n num_relations = mapped_triples[:, 1].max().item() + 1\n if entity_ids is None:\n entity_ids = get_entities(mapped_triples)\n if relation_ids is None:\n relation_ids = get_relations(mapped_triples)\n return CoreTriplesFactory(\n mapped_triples=mapped_triples,\n num_entities=num_entities,\n num_relations=num_relations,\n entity_ids=entity_ids,\n relation_ids=relation_ids,\n create_inverse_triples=create_inverse_triples,\n metadata=metadata,\n )\n\n @property\n def num_entities(self) -> int: # noqa: D401\n \"\"\"The number of unique entities.\"\"\"\n return self._num_entities\n\n @property\n def num_relations(self) -> int: # noqa: D401\n \"\"\"The number of unique relations.\"\"\"\n if self.create_inverse_triples:\n return 2 * self.real_num_relations\n return self.real_num_relations\n\n @property\n def real_num_relations(self) -> int: # noqa: D401\n \"\"\"The number of relations without inverse relations.\"\"\"\n return self._num_relations\n\n @property\n def num_triples(self) -> int: # noqa: D401\n \"\"\"The number of triples.\"\"\"\n return self.mapped_triples.shape[0]\n\n def extra_repr(self) -> str:\n \"\"\"Extra representation string.\"\"\"\n d = [\n ('num_entities', self.num_entities),\n ('num_relations', self.num_relations),\n ('num_triples', self.num_triples),\n ('inverse_triples', self.create_inverse_triples),\n ]\n d.extend(sorted(self.metadata.items())) # type: ignore\n return ', '.join(\n f'{k}=\"{v}\"' if isinstance(v, (str, pathlib.Path)) else f'{k}={v}'\n for k, v in d\n )\n\n def __repr__(self): # noqa: D105\n return f'{self.__class__.__name__}({self.extra_repr()})'\n\n def with_labels(\n self,\n entity_to_id: Mapping[str, int],\n relation_to_id: Mapping[str, int],\n ) -> \"TriplesFactory\":\n \"\"\"Add labeling to the TriplesFactory.\"\"\"\n # check new label to ID mappings\n for name, columns, new_labeling in (\n (\"entity\", [0, 2], entity_to_id),\n (\"relation\", 1, relation_to_id),\n ):\n existing_ids = set(self.mapped_triples[:, columns].unique().tolist())\n if not existing_ids.issubset(new_labeling.values()):\n diff = existing_ids.difference(new_labeling.values())\n raise ValueError(f\"Some existing IDs do not occur in the new {name} labeling: {diff}\")\n return TriplesFactory(\n mapped_triples=self.mapped_triples,\n entity_to_id=entity_to_id,\n relation_to_id=relation_to_id,\n create_inverse_triples=self.create_inverse_triples,\n metadata=self.metadata,\n )\n\n def get_inverse_relation_id(self, relation: int) -> int:\n \"\"\"Get the inverse relation identifier for the given relation.\"\"\"\n if not self.create_inverse_triples:\n raise ValueError('Can not get inverse triple, they have not been created.')\n return self._get_inverse_relation_id(relation)\n\n @staticmethod\n def _get_inverse_relation_id(relation_id: Union[int, torch.LongTensor]) -> Union[int, torch.LongTensor]:\n return relation_id + 1\n\n def _add_inverse_triples_if_necessary(self, mapped_triples: MappedTriples) -> MappedTriples:\n \"\"\"Add inverse triples if they shall be created.\"\"\"\n if not self.create_inverse_triples:\n return mapped_triples\n\n logger.info(\"Creating inverse triples.\")\n h, r, t = mapped_triples.t()\n r = 2 * r\n return torch.cat([\n torch.stack([h, r, t], dim=-1),\n torch.stack([t, self._get_inverse_relation_id(r), h], dim=-1),\n ])\n\n def create_slcwa_instances(self) -> Instances:\n \"\"\"Create sLCWA instances for this factory's triples.\"\"\"\n return self._create_instances(SLCWAInstances)\n\n def create_lcwa_instances(self, use_tqdm: Optional[bool] = None) -> Instances:\n \"\"\"Create LCWA instances for this factory's triples.\"\"\"\n return self._create_instances(LCWAInstances)\n\n def _create_instances(self, instances_cls: Type[Instances]) -> Instances:\n return instances_cls.from_triples(\n mapped_triples=self._add_inverse_triples_if_necessary(mapped_triples=self.mapped_triples),\n num_entities=self.num_entities,\n )\n\n def get_most_frequent_relations(self, n: Union[int, float]) -> Set[int]:\n \"\"\"Get the IDs of the n most frequent relations.\n\n :param n:\n Either the (integer) number of top relations to keep or the (float) percentage of top relationships to keep.\n \"\"\"\n logger.info(f'applying cutoff of {n} to {self}')\n if isinstance(n, float):\n assert 0 < n < 1\n n = int(self.num_relations * n)\n elif not isinstance(n, int):\n raise TypeError('n must be either an integer or a float')\n\n uniq, counts = self.mapped_triples[:, 1].unique(return_counts=True)\n top_counts, top_ids = counts.topk(k=n, largest=True)\n return set(uniq[top_ids].tolist())\n\n def clone_and_exchange_triples(\n self,\n mapped_triples: MappedTriples,\n extra_metadata: Optional[Dict[str, Any]] = None,\n keep_metadata: bool = True,\n create_inverse_triples: Optional[bool] = None,\n ) -> \"CoreTriplesFactory\":\n \"\"\"\n Create a new triples factory sharing everything except the triples.\n\n .. note ::\n We use shallow copies.\n\n :param mapped_triples:\n The new mapped triples.\n :param extra_metadata:\n Extra metadata to include in the new triples factory. If ``keep_metadata`` is true,\n the dictionaries will be unioned with precedence taken on keys from ``extra_metadata``.\n :param keep_metadata:\n Pass the current factory's metadata to the new triples factory\n :param create_inverse_triples:\n Change inverse triple creation flag. If None, use flag from this factory.\n\n :return:\n The new factory.\n \"\"\"\n if create_inverse_triples is None:\n create_inverse_triples = self.create_inverse_triples\n return CoreTriplesFactory(\n mapped_triples=mapped_triples,\n num_entities=self.num_entities,\n num_relations=self.real_num_relations,\n entity_ids=self.entity_ids,\n relation_ids=self.relation_ids,\n create_inverse_triples=create_inverse_triples,\n metadata={\n **(extra_metadata or {}),\n **(self.metadata if keep_metadata else {}), # type: ignore\n },\n )\n\n def split(\n self,\n ratios: Union[float, Sequence[float]] = 0.8,\n *,\n random_state: TorchRandomHint = None,\n randomize_cleanup: bool = False,\n method: Optional[str] = None,\n ) -> List['CoreTriplesFactory']:\n \"\"\"Split a triples factory into a train/test.\n\n :param ratios:\n There are three options for this argument:\n\n 1. A float can be given between 0 and 1.0, non-inclusive. The first set of triples will\n get this ratio and the second will get the rest.\n 2. A list of ratios can be given for which set in which order should get what ratios as in\n ``[0.8, 0.1]``. The final ratio can be omitted because that can be calculated.\n 3. All ratios can be explicitly set in order such as in ``[0.8, 0.1, 0.1]``\n where the sum of all ratios is 1.0.\n :param random_state:\n The random state used to shuffle and split the triples.\n :param randomize_cleanup:\n If true, uses the non-deterministic method for moving triples to the training set. This has the\n advantage that it does not necessarily have to move all of them, but it might be significantly\n slower since it moves one triple at a time.\n :param method:\n The name of the method to use, from SPLIT_METHODS. Defaults to \"coverage\".\n\n :return:\n A partition of triples, which are split (approximately) according to the ratios, stored TriplesFactory's\n which share everything else with this root triples factory.\n\n .. code-block:: python\n\n ratio = 0.8 # makes a [0.8, 0.2] split\n training_factory, testing_factory = factory.split(ratio)\n\n ratios = [0.8, 0.1] # makes a [0.8, 0.1, 0.1] split\n training_factory, testing_factory, validation_factory = factory.split(ratios)\n\n ratios = [0.8, 0.1, 0.1] # also makes a [0.8, 0.1, 0.1] split\n training_factory, testing_factory, validation_factory = factory.split(ratios)\n \"\"\"\n # Make new triples factories for each group\n return [\n self.clone_and_exchange_triples(\n mapped_triples=triples,\n # do not explicitly create inverse triples for testing; this is handled by the evaluation code\n create_inverse_triples=None if i == 0 else False,\n )\n for i, triples in enumerate(split(\n mapped_triples=self.mapped_triples,\n ratios=ratios,\n random_state=random_state,\n randomize_cleanup=randomize_cleanup,\n method=method,\n ))\n ]\n\n def get_mask_for_entities(\n self,\n entities: Union[Collection[int]],\n invert: bool = False,\n ) -> torch.BoolTensor:\n \"\"\"Get a boolean mask for triples with the given entities.\"\"\"\n return _get_triple_mask(\n ids=entities,\n triples=self.mapped_triples,\n columns=(0, 2), # head and entity need to fulfil the requirement\n invert=invert,\n max_id=self.num_entities,\n )\n\n def get_mask_for_relations(\n self,\n relations: Collection[int],\n invert: bool = False,\n ) -> torch.BoolTensor:\n \"\"\"Get a boolean mask for triples with the given relations.\"\"\"\n return _get_triple_mask(\n ids=relations,\n triples=self.mapped_triples,\n columns=1,\n invert=invert,\n max_id=self.num_relations,\n )\n\n def tensor_to_df(\n self,\n tensor: torch.LongTensor,\n **kwargs: Union[torch.Tensor, np.ndarray, Sequence],\n ) -> pd.DataFrame:\n \"\"\"Take a tensor of triples and make a pandas dataframe with labels.\n\n :param tensor: shape: (n, 3)\n The triples, ID-based and in format (head_id, relation_id, tail_id).\n :param kwargs:\n Any additional number of columns. Each column needs to be of shape (n,). Reserved column names:\n {\"head_id\", \"head_label\", \"relation_id\", \"relation_label\", \"tail_id\", \"tail_label\"}.\n :return:\n A dataframe with n rows, and 6 + len(kwargs) columns.\n \"\"\"\n # Input validation\n additional_columns = set(kwargs.keys())\n forbidden = additional_columns.intersection(TRIPLES_DF_COLUMNS)\n if len(forbidden) > 0:\n raise ValueError(\n f'The key-words for additional arguments must not be in {TRIPLES_DF_COLUMNS}, but {forbidden} were '\n f'used.',\n )\n\n # convert to numpy\n tensor = tensor.cpu().numpy()\n data = dict(zip(['head_id', 'relation_id', 'tail_id'], tensor.T))\n\n # Additional columns\n for key, values in kwargs.items():\n # convert PyTorch tensors to numpy\n if isinstance(values, torch.Tensor):\n values = values.cpu().numpy()\n data[key] = values\n\n # convert to dataframe\n rv = pd.DataFrame(data=data)\n\n # Re-order columns\n columns = list(TRIPLES_DF_COLUMNS[::2]) + sorted(set(rv.columns).difference(TRIPLES_DF_COLUMNS))\n return rv.loc[:, columns]\n\n def new_with_restriction(\n self,\n entities: Union[None, Collection[int], Collection[str]] = None,\n relations: Union[None, Collection[int], Collection[str]] = None,\n invert_entity_selection: bool = False,\n invert_relation_selection: bool = False,\n ) -> 'CoreTriplesFactory':\n \"\"\"Make a new triples factory only keeping the given entities and relations, but keeping the ID mapping.\n\n :param entities:\n The entities of interest. If None, defaults to all entities.\n :param relations:\n The relations of interest. If None, defaults to all relations.\n :param invert_entity_selection:\n Whether to invert the entity selection, i.e. select those triples without the provided entities.\n :param invert_relation_selection:\n Whether to invert the relation selection, i.e. select those triples without the provided relations.\n\n :return:\n A new triples factory, which has only a subset of the triples containing the entities and relations of\n interest. The label-to-ID mapping is *not* modified.\n \"\"\"\n keep_mask = None\n\n extra_metadata = {}\n # Filter for entities\n if entities is not None:\n if any(isinstance(e, str) for e in entities):\n raise ValueError(f\"{self.__class__} does not support label-based restriction.\")\n entities = cast(Collection[int], entities)\n extra_metadata['entity_restriction'] = entities\n keep_mask = self.get_mask_for_entities(entities=entities, invert=invert_entity_selection)\n remaining_entities = self.num_entities - len(entities) if invert_entity_selection else len(entities)\n logger.info(f\"keeping {format_relative_comparison(remaining_entities, self.num_entities)} entities.\")\n\n # Filter for relations\n if relations is not None:\n if any(isinstance(r, str) for r in relations):\n raise ValueError(f\"{self.__class__} does not support label-based restriction.\")\n relations = cast(Collection[int], relations)\n extra_metadata['relation_restriction'] = relations\n relation_mask = self.get_mask_for_relations(relations=relations, invert=invert_relation_selection)\n remaining_relations = self.num_relations - len(relations) if invert_entity_selection else len(relations)\n logger.info(f\"keeping {format_relative_comparison(remaining_relations, self.num_relations)} relations.\")\n keep_mask = relation_mask if keep_mask is None else keep_mask & relation_mask\n\n # No filtering happened\n if keep_mask is None:\n return self\n\n num_triples = keep_mask.sum()\n logger.info(f\"keeping {format_relative_comparison(num_triples, self.num_triples)} triples.\")\n return self.clone_and_exchange_triples(\n mapped_triples=self.mapped_triples[keep_mask],\n extra_metadata=extra_metadata,\n )\n\n\nclass TriplesFactory(CoreTriplesFactory):\n \"\"\"Create instances given the path to triples.\"\"\"\n\n def __init__(\n self,\n mapped_triples: MappedTriples,\n entity_to_id: EntityMapping,\n relation_to_id: RelationMapping,\n create_inverse_triples: bool = False,\n metadata: Optional[Mapping[str, Any]] = None,\n ):\n \"\"\"\n Create the triples factory.\n\n :param mapped_triples: shape: (n, 3)\n A three-column matrix where each row are the head identifier, relation identifier, then tail identifier.\n :param entity_to_id:\n The mapping from entities' labels to their indices.\n :param relation_to_id:\n The mapping from relations' labels to their indices.\n :param create_inverse_triples:\n Whether to create inverse triples.\n :param metadata:\n Arbitrary metadata to go with the graph\n \"\"\"\n super().__init__(\n mapped_triples=mapped_triples,\n num_entities=len(entity_to_id),\n num_relations=len(relation_to_id),\n entity_ids=sorted(entity_to_id.values()),\n relation_ids=sorted(relation_to_id.values()),\n create_inverse_triples=create_inverse_triples,\n metadata=metadata,\n )\n self.entity_labeling = Labeling(label_to_id=entity_to_id)\n self.relation_labeling = Labeling(label_to_id=relation_to_id)\n\n @classmethod\n def from_labeled_triples(\n cls,\n triples: LabeledTriples,\n create_inverse_triples: bool = False,\n entity_to_id: Optional[EntityMapping] = None,\n relation_to_id: Optional[RelationMapping] = None,\n compact_id: bool = True,\n filter_out_candidate_inverse_relations: bool = True,\n metadata: Optional[Dict[str, Any]] = None,\n ) -> 'TriplesFactory':\n \"\"\"\n Create a new triples factory from label-based triples.\n\n :param triples: shape: (n, 3), dtype: str\n The label-based triples.\n :param create_inverse_triples:\n Whether to create inverse triples.\n :param entity_to_id:\n The mapping from entity labels to ID. If None, create a new one from the triples.\n :param relation_to_id:\n The mapping from relations labels to ID. If None, create a new one from the triples.\n :param compact_id:\n Whether to compact IDs such that the IDs are consecutive.\n :param filter_out_candidate_inverse_relations:\n Whether to remove triples with relations with the inverse suffix.\n :param metadata:\n Arbitrary key/value pairs to store as metadata\n\n :return:\n A new triples factory.\n \"\"\"\n # Check if the triples are inverted already\n # We re-create them pure index based to ensure that _all_ inverse triples are present and that they are\n # contained if and only if create_inverse_triples is True.\n if filter_out_candidate_inverse_relations:\n unique_relations, inverse = np.unique(triples[:, 1], return_inverse=True)\n suspected_to_be_inverse_relations = {r for r in unique_relations if r.endswith(INVERSE_SUFFIX)}\n if len(suspected_to_be_inverse_relations) > 0:\n logger.warning(\n f'Some triples already have the inverse relation suffix {INVERSE_SUFFIX}. '\n f'Re-creating inverse triples to ensure consistency. You may disable this behaviour by passing '\n f'filter_out_candidate_inverse_relations=False',\n )\n relation_ids_to_remove = [\n i\n for i, r in enumerate(unique_relations.tolist())\n if r in suspected_to_be_inverse_relations\n ]\n mask = np.isin(element=inverse, test_elements=relation_ids_to_remove, invert=True)\n logger.info(f\"keeping {mask.sum() / mask.shape[0]} triples.\")\n triples = triples[mask]\n\n # Generate entity mapping if necessary\n if entity_to_id is None:\n entity_to_id = create_entity_mapping(triples=triples)\n if compact_id:\n entity_to_id = compact_mapping(mapping=entity_to_id)[0]\n\n # Generate relation mapping if necessary\n if relation_to_id is None:\n relation_to_id = create_relation_mapping(triples[:, 1])\n if compact_id:\n relation_to_id = compact_mapping(mapping=relation_to_id)[0]\n\n # Map triples of labels to triples of IDs.\n mapped_triples = _map_triples_elements_to_ids(\n triples=triples,\n entity_to_id=entity_to_id,\n relation_to_id=relation_to_id,\n )\n\n return cls(\n entity_to_id=entity_to_id,\n relation_to_id=relation_to_id,\n mapped_triples=mapped_triples,\n create_inverse_triples=create_inverse_triples,\n metadata=metadata,\n )\n\n @classmethod\n def from_path(\n cls,\n path: Union[str, pathlib.Path, TextIO],\n create_inverse_triples: bool = False,\n entity_to_id: Optional[EntityMapping] = None,\n relation_to_id: Optional[RelationMapping] = None,\n compact_id: bool = True,\n metadata: Optional[Dict[str, Any]] = None,\n load_triples_kwargs: Optional[Mapping[str, Any]] = None,\n ) -> 'TriplesFactory':\n \"\"\"\n Create a new triples factory from triples stored in a file.\n\n :param path:\n The path where the label-based triples are stored.\n :param create_inverse_triples:\n Whether to create inverse triples.\n :param entity_to_id:\n The mapping from entity labels to ID. If None, create a new one from the triples.\n :param relation_to_id:\n The mapping from relations labels to ID. If None, create a new one from the triples.\n :param compact_id:\n Whether to compact IDs such that the IDs are consecutive.\n :param metadata:\n Arbitrary key/value pairs to store as metadata with the triples factory. Do not\n include ``path`` as a key because it is automatically taken from the ``path``\n kwarg to this function.\n :param load_triples_kwargs: Optional keyword arguments to pass to :func:`load_triples`.\n Could include the ``delimiter`` or a ``column_remapping``.\n\n :return:\n A new triples factory.\n \"\"\"\n path = normalize_path(path)\n\n # TODO: Check if lazy evaluation would make sense\n triples = load_triples(path, **(load_triples_kwargs or {}))\n\n return cls.from_labeled_triples(\n triples=triples,\n create_inverse_triples=create_inverse_triples,\n entity_to_id=entity_to_id,\n relation_to_id=relation_to_id,\n compact_id=compact_id,\n metadata={\n 'path': path,\n **(metadata or {}),\n },\n )\n\n def clone_and_exchange_triples(\n self,\n mapped_triples: MappedTriples,\n extra_metadata: Optional[Dict[str, Any]] = None,\n keep_metadata: bool = True,\n create_inverse_triples: Optional[bool] = None,\n ) -> \"TriplesFactory\": # noqa: D102\n if create_inverse_triples is None:\n create_inverse_triples = self.create_inverse_triples\n return TriplesFactory(\n entity_to_id=self.entity_to_id,\n relation_to_id=self.relation_to_id,\n mapped_triples=mapped_triples,\n create_inverse_triples=create_inverse_triples,\n metadata={\n **(extra_metadata or {}),\n **(self.metadata if keep_metadata else {}), # type: ignore\n },\n )\n\n @property\n def entity_to_id(self) -> Mapping[str, int]:\n \"\"\"Return the mapping from entity labels to IDs.\"\"\"\n return self.entity_labeling.label_to_id\n\n @property\n def entity_id_to_label(self) -> Mapping[int, str]:\n \"\"\"Return the mapping from entity IDs to labels.\"\"\"\n return self.entity_labeling.id_to_label\n\n @property\n def relation_to_id(self) -> Mapping[str, int]:\n \"\"\"Return the mapping from relations labels to IDs.\"\"\"\n return self.relation_labeling.label_to_id\n\n @property\n def relation_id_to_label(self) -> Mapping[int, str]:\n \"\"\"Return the mapping from relations IDs to labels.\"\"\"\n return self.relation_labeling.id_to_label\n\n @property\n def triples(self) -> np.ndarray: # noqa: D401\n \"\"\"The labeled triples, a 3-column matrix where each row are the head label, relation label, then tail label.\"\"\"\n logger.warning(\"Reconstructing all label-based triples. This is expensive and rarely needed.\")\n return self.label_triples(self.mapped_triples)\n\n def get_inverse_relation_id(self, relation: Union[str, int]) -> int:\n \"\"\"Get the inverse relation identifier for the given relation.\"\"\"\n relation = next(iter(self.relations_to_ids(relations=[relation]))) # type: ignore\n return super().get_inverse_relation_id(relation=relation)\n\n def label_triples(\n self,\n triples: MappedTriples,\n unknown_entity_label: str = \"[UNKNOWN]\",\n unknown_relation_label: Optional[str] = None,\n ) -> LabeledTriples:\n \"\"\"\n Convert ID-based triples to label-based ones.\n\n :param triples:\n The ID-based triples.\n :param unknown_entity_label:\n The label to use for unknown entity IDs.\n :param unknown_relation_label:\n The label to use for unknown relation IDs.\n\n :return:\n The same triples, but labeled.\n \"\"\"\n if len(triples) == 0:\n return np.empty(shape=(0, 3), dtype=str)\n if unknown_relation_label is None:\n unknown_relation_label = unknown_entity_label\n return np.stack([\n labeling.label(ids=column, unknown_label=unknown_label)\n for (labeling, unknown_label), column in zip(\n [\n (self.entity_labeling, unknown_entity_label),\n (self.relation_labeling, unknown_relation_label),\n (self.entity_labeling, unknown_entity_label),\n ],\n triples.t().numpy(),\n )\n ], axis=1)\n\n def entities_to_ids(self, entities: Union[Collection[int], Collection[str]]) -> Collection[int]:\n \"\"\"Normalize entities to IDs.\"\"\"\n return _ensure_ids(labels_or_ids=entities, label_to_id=self.entity_labeling.label_to_id)\n\n def get_mask_for_entities(\n self,\n entities: Union[Collection[int], Collection[str]],\n invert: bool = False,\n ) -> torch.BoolTensor:\n \"\"\"Get a boolean mask for triples with the given entities.\"\"\"\n return super().get_mask_for_entities(entities=self.entities_to_ids(entities=entities))\n\n def relations_to_ids(\n self,\n relations: Union[Collection[int], Collection[str]],\n ) -> Collection[int]:\n \"\"\"Normalize relations to IDs.\"\"\"\n return _ensure_ids(labels_or_ids=relations, label_to_id=self.relation_labeling.label_to_id)\n\n def get_mask_for_relations(\n self,\n relations: Union[Collection[int], Collection[str]],\n invert: bool = False,\n ) -> torch.BoolTensor:\n \"\"\"Get a boolean mask for triples with the given relations.\"\"\"\n return super().get_mask_for_relations(relations=self.relations_to_ids(relations=relations))\n\n def entity_word_cloud(self, top: Optional[int] = None):\n \"\"\"Make a word cloud based on the frequency of occurrence of each entity in a Jupyter notebook.\n\n :param top: The number of top entities to show. Defaults to 100.\n\n .. warning::\n\n This function requires the ``word_cloud`` package. Use ``pip install pykeen[plotting]`` to\n install it automatically, or install it yourself with\n ``pip install git+https://github.com/kavgan/word_cloud.git``.\n \"\"\"\n return self._word_cloud(\n ids=self.mapped_triples[:, [0, 2]],\n id_to_label=self.entity_labeling.id_to_label,\n top=top or 100,\n )\n\n def relation_word_cloud(self, top: Optional[int] = None):\n \"\"\"Make a word cloud based on the frequency of occurrence of each relation in a Jupyter notebook.\n\n :param top: The number of top relations to show. Defaults to 100.\n\n .. warning::\n\n This function requires the ``word_cloud`` package. Use ``pip install pykeen[plotting]`` to\n install it automatically, or install it yourself with\n ``pip install git+https://github.com/kavgan/word_cloud.git``.\n \"\"\"\n return self._word_cloud(\n ids=self.mapped_triples[:, 1],\n id_to_label=self.relation_labeling.id_to_label,\n top=top or 100,\n )\n\n def _word_cloud(self, *, ids: torch.LongTensor, id_to_label: Mapping[int, str], top: int):\n try:\n from word_cloud.word_cloud_generator import WordCloud\n except ImportError:\n logger.warning(\n 'Could not import module `word_cloud`. '\n 'Try installing it with `pip install git+https://github.com/kavgan/word_cloud.git`',\n )\n return\n\n # pre-filter to keep only topk\n uniq, counts = ids.view(-1).unique(return_counts=True)\n\n # if top is larger than the number of available options\n top = min(top, uniq.numel())\n top_counts, top_ids = counts.topk(k=top, largest=True)\n\n # generate text\n text = list(itertools.chain(*(\n itertools.repeat(id_to_label[e_id], count)\n for e_id, count in zip(top_ids.tolist(), top_counts.tolist())\n )))\n\n from IPython.core.display import HTML\n word_cloud = WordCloud()\n return HTML(word_cloud.get_embed_code(text=text, topn=top))\n\n def tensor_to_df(\n self,\n tensor: torch.LongTensor,\n **kwargs: Union[torch.Tensor, np.ndarray, Sequence],\n ) -> pd.DataFrame: # noqa: D102\n data = super().tensor_to_df(tensor=tensor, **kwargs)\n old_col = list(data.columns)\n\n # vectorized label lookup\n for column, labeling in dict(\n head=self.entity_labeling,\n relation=self.relation_labeling,\n tail=self.entity_labeling,\n ).items():\n assert labeling is not None\n data[f'{column}_label'] = labeling.label(\n ids=data[f'{column}_id'],\n unknown_label=(\"[unknown_\" + column + \"]\").upper(),\n )\n\n # Re-order columns\n columns = list(TRIPLES_DF_COLUMNS) + old_col[3:]\n return data.loc[:, columns]\n\n def new_with_restriction(\n self,\n entities: Union[None, Collection[int], Collection[str]] = None,\n relations: Union[None, Collection[int], Collection[str]] = None,\n invert_entity_selection: bool = False,\n invert_relation_selection: bool = False,\n ) -> 'TriplesFactory': # noqa: D102\n if entities is None and relations is None:\n return self\n if entities is not None:\n entities = self.entities_to_ids(entities=entities)\n if relations is not None:\n relations = self.relations_to_ids(relations=relations)\n return super().new_with_restriction(\n entities=entities,\n relations=relations,\n invert_entity_selection=invert_entity_selection,\n invert_relation_selection=invert_relation_selection,\n ).with_labels(entity_to_id=self.entity_to_id, relation_to_id=self.relation_to_id)\n\n\ndef cat_triples(*triples_factories: CoreTriplesFactory) -> MappedTriples:\n \"\"\"Concatenate several triples factories.\"\"\"\n return torch.cat([\n factory.mapped_triples\n for factory in triples_factories\n ], dim=0)\n\n\ndef splits_steps(a: Sequence[CoreTriplesFactory], b: Sequence[CoreTriplesFactory]) -> int:\n \"\"\"Compute the number of moves to go from the first sequence of triples factories to the second.\n\n :return: The number of triples present in the training sets in both\n \"\"\"\n if len(a) != len(b):\n raise ValueError('Must have same number of triples factories')\n\n train_1 = _smt(a[0].mapped_triples)\n train_2 = _smt(b[0].mapped_triples)\n\n # FIXME currently the implementation does not consider the non-training (i.e., second-last entries)\n # for the number of steps. Consider more interesting way to discuss splits w/ valid\n\n return len(train_1.symmetric_difference(train_2))\n\n\ndef splits_similarity(a: Sequence[CoreTriplesFactory], b: Sequence[CoreTriplesFactory]) -> float:\n \"\"\"Compute the similarity between two datasets' splits.\n\n :return: The number of triples present in the training sets in both\n \"\"\"\n steps = splits_steps(a, b)\n n = sum(tf.num_triples for tf in a)\n return 1 - steps / n\n\n\ndef _smt(x):\n return set(tuple(xx.detach().numpy().tolist()) for xx in x)\n\n\ndef normalize_path(path: Union[str, pathlib.Path, TextIO]) -> pathlib.Path:\n \"\"\"Normalize path.\"\"\"\n if isinstance(path, TextIO):\n return pathlib.Path(path.name).resolve()\n elif isinstance(path, str):\n return pathlib.Path(path).resolve()\n elif isinstance(path, pathlib.Path):\n return path.resolve()\n else:\n raise TypeError(f'path is invalid type: {type(path)}')\n"
] |
[
[
"numpy.isin",
"torch.empty",
"numpy.unique",
"torch.cat",
"pandas.DataFrame",
"torch.tensor",
"numpy.concatenate",
"numpy.vectorize",
"numpy.asanyarray",
"torch.stack",
"numpy.array",
"numpy.empty"
]
] |
patrislav1/streaming-form-data
|
[
"c9a5f5a542712fdc3ef41dd84889af9619f93822"
] |
[
"utils/profile.py"
] |
[
"from argparse import ArgumentParser\nimport cProfile\nfrom functools import wraps\nfrom io import StringIO, BytesIO\nfrom numpy import random\nimport pstats\n\nfrom requests_toolbelt import MultipartEncoder\nfrom streaming_form_data.parser import StreamingFormDataParser\nfrom streaming_form_data.targets import ValueTarget\n\n\ndef c_profile(sort_by='tottime'):\n def decorator(func):\n @wraps(func)\n def wrapped(*args, **kwargs):\n profiler = cProfile.Profile()\n profiler.enable()\n\n result = func(*args, **kwargs)\n\n profiler.disable()\n\n stream = StringIO()\n stats = pstats.Stats(profiler, stream=stream).sort_stats(sort_by)\n stats.print_stats(25)\n\n print(stream.getvalue())\n\n return result\n return wrapped\n return decorator\n\n\n# https://zapier.com/engineering/profiling-python-boss/\ntry:\n from line_profiler import LineProfiler\n\n def line_profile(follow=[]):\n def inner(func):\n def profiled_func(*args, **kwargs):\n try:\n profiler = LineProfiler()\n profiler.add_function(func)\n for f in follow:\n profiler.add_function(f)\n profiler.enable_by_count()\n return func(*args, **kwargs)\n finally:\n profiler.print_stats()\n return profiled_func\n return inner\n\nexcept ImportError:\n def line_profile(follow=[]):\n \"Helpful if you accidentally leave in production!\"\n def inner(func):\n def nothing(*args, **kwargs):\n return func(*args, **kwargs)\n return nothing\n return inner\n\n\ndef parse_args():\n parser = ArgumentParser()\n parser.add_argument('-c', '--content-type', type=str, required=True,\n help='Content Type of the input file')\n parser.add_argument('-f', '--filename', type=str, required=False,\n help='File to be uploaded')\n parser.add_argument('--data-size', metavar='SIZE',\n type=int, required=False,\n help='Size of generated data' +\n ' to be used instead of real file')\n return parser.parse_args()\n\n\ndef get_random_bytes(size, seed):\n random.seed(seed)\n return random.bytes(size)\n\n\ndef open_data(args):\n if args.filename is not None:\n return open(args.filename, 'rb')\n if args.data_size is not None:\n return BytesIO(get_random_bytes(args.data_size, 42))\n raise Exception('Not enough arguments passed: ' +\n 'please specify --filename or --data_size argument')\n\n\n@c_profile()\ndef main():\n args = parse_args()\n\n with open_data(args) as fd:\n encoder = MultipartEncoder(fields={\n 'file': ('file', fd, args.content_type)\n })\n\n parser = StreamingFormDataParser(\n headers={'Content-Type': encoder.content_type})\n parser.register('file', ValueTarget())\n\n parser.data_received(encoder.to_string())\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.random.bytes",
"numpy.random.seed"
]
] |
zengxianyu/photometric_optimization
|
[
"f0b20c75bd5692196da6301a07dfe3009d21b474"
] |
[
"wj_fitting.py"
] |
[
"import os, sys\nimport cv2\nimport torch\nimport torchvision\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport numpy as np\nimport datetime\nfrom face_seg_model import BiSeNet\nimport torchvision.transforms as transforms\nfrom renderer import Renderer\nimport util\nfrom PIL import Image\nfrom face_alignment.detection import sfd_detector as detector\nfrom face_alignment.detection import FAN_landmark\nimport matplotlib.pyplot as plt\n\nfrom photometric_fitting import PhotometricFitting\n\ntorch.backends.cudnn.benchmark = True\n\n\nclass WJPhotometricFitting(PhotometricFitting):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def crop_img(self, ori_image, rect):\n l, t, r, b = rect\n center_x = r - (r - l) // 2\n center_y = b - (b - t) // 2\n w = (r - l) * 1.2\n h = (b - t) * 1.2\n crop_size = max(w, h)\n cropped_size = self.config.cropped_size\n if crop_size > cropped_size:\n crop_ly = int(max(0, center_y - crop_size // 2))\n crop_lx = int(max(0, center_x - crop_size // 2))\n crop_ly = int(min(ori_image.shape[0] - crop_size, crop_ly))\n crop_lx = int(min(ori_image.shape[1] - crop_size, crop_lx))\n crop_image = ori_image[crop_ly: int(crop_ly + crop_size), crop_lx: int(crop_lx + crop_size), :]\n else:\n\n crop_ly = int(max(0, center_y - cropped_size // 2))\n crop_lx = int(max(0, center_x - cropped_size // 2))\n crop_ly = int(min(ori_image.shape[0] - cropped_size, crop_ly))\n crop_lx = int(min(ori_image.shape[1] - cropped_size, crop_lx))\n crop_image = ori_image[crop_ly: int(crop_ly + cropped_size), crop_lx: int(crop_lx + cropped_size), :]\n new_rect = [l - crop_lx, t - crop_ly, r - crop_lx, b - crop_ly]\n return crop_image, new_rect\n\n def run(self, img, net, rect_detect, landmark_detect, rect_thresh, save_name, savefolder):\n # The implementation is potentially able to optimize with images(batch_size>1),\n # here we show the example with a single image fitting\n images = []\n landmarks = []\n image_masks = []\n bbox = rect_detect.extract(img, rect_thresh)\n if len(bbox) > 0:\n crop_image, new_bbox = self.crop_img(img, bbox[0])\n #plt.imshow(crop_image)\n #plt.show()\n #pdb.set_trace()\n\n resize_img, landmark = landmark_detect.extract([crop_image, [new_bbox]])\n landmark = landmark[0]\n landmark[:, 0] = landmark[:, 0] / float(resize_img.shape[1]) * 2 - 1\n landmark[:, 1] = landmark[:, 1] / float(resize_img.shape[0]) * 2 - 1\n landmarks.append(torch.from_numpy(landmark)[None, :, :].double().to(self.device))\n\n image = cv2.resize(crop_image, (self.config.cropped_size, self.config.cropped_size)).astype(np.float32) / 255.\n image = image[:, :, [2, 1, 0]].transpose(2, 0, 1)\n images.append(torch.from_numpy(image[None, :, :, :]).double().to(self.device))\n image_mask = face_seg(crop_image, net)\n image_mask = cv2.resize(image_mask, (self.config.cropped_size, self.config.cropped_size))\n image_mask = image_mask[..., None].astype('float32')\n image_mask = image_mask.transpose(2, 0, 1)\n image_mask_bn = np.zeros_like(image_mask)\n image_mask_bn[np.where(image_mask != 0)] = 1.\n image_masks.append(torch.from_numpy(image_mask_bn[None, :, :, :]).double().to(self.device))\n\n images = torch.cat(images, dim=0)\n images = F.interpolate(images, [self.image_size, self.image_size])\n image_masks = torch.cat(image_masks, dim=0)\n image_masks = F.interpolate(image_masks, [self.image_size, self.image_size])\n\n landmarks = torch.cat(landmarks, dim=0)\n util.check_mkdir(savefolder)\n save_name = os.path.join(savefolder, save_name)\n images = images.float()\n landmarks = landmarks.float()\n\n # optimize\n single_params = self.optimize(images, landmarks, image_masks, savefolder)\n self.render.save_obj(filename=save_name,\n vertices=torch.from_numpy(single_params['verts'][0]).to(self.device),\n textures=torch.from_numpy(single_params['albedos'][0]).to(self.device)\n )\n np.save(save_name, single_params)\n\n\ndef face_seg(img, net):\n face_area = [1, 2, 3, 4, 5, 6, 10, 11, 12, 13]\n to_tensor = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ])\n pil_image = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n resize_pil_image = pil_image.resize((512, 512), Image.BILINEAR)\n tensor_image = to_tensor(resize_pil_image)\n tensor_image = torch.unsqueeze(tensor_image, 0)\n tensor_image = tensor_image.cuda()\n out = net(tensor_image)[0]\n parsing = out.squeeze(0).cpu().detach().numpy().argmax(0)\n vis_parsing_anno = parsing.copy().astype(np.uint8)\n vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1]))\n num_of_class = np.max(vis_parsing_anno)\n\n for pi in range(1, num_of_class + 1):\n if pi in face_area:\n index = np.where(vis_parsing_anno == pi)\n vis_parsing_anno_color[index[0], index[1]] = 1\n\n return vis_parsing_anno_color\n\n\n\n\ndef resize_para(ori_frame):\n w, h, c = ori_frame.shape\n d = max(w, h)\n scale_to = 640 if d >= 1280 else d / 2\n scale_to = max(64, scale_to)\n input_scale = d / scale_to\n w = int(w / input_scale)\n h = int(h / input_scale)\n image_info = [w, h, input_scale]\n return image_info\n\n\ndef draw_train_process(title, iters, loss, label_loss):\n plt.title(title, fontsize=24)\n plt.xlabel(\"iter\", fontsize=20)\n plt.ylabel(\"loss\", fontsize=20)\n plt.plot(iters, loss[0], color='red', label=label_loss[0])\n plt.plot(iters, loss[1], color='green', label=label_loss[1])\n plt.plot(iters, loss[2], color='blue', label=label_loss[2])\n plt.legend()\n plt.grid()\n plt.show()\n\n\nif __name__ == '__main__':\n import pdb\n import importlib\n import torch\n image_path = str(sys.argv[1])\n save_path = str(sys.argv[2])\n device_name = str(sys.argv[3])\n if len(sys.argv)>4:\n model_name = str(sys.argv[4])\n else:\n model_name = 'flame'\n\n model_filename = \"conf.\" + model_name\n modellib = importlib.import_module(model_filename)\n config = modellib.config\n config_append = {\n 'face_seg_model': './model/face_seg.pth',\n 'seg_class': 19,\n 'face_detect_type': \"2D\",\n 'rect_model_path': \"./model/s3fd.pth\",\n 'rect_thresh': 0.5,\n 'landmark_model_path': \"./model/2DFAN4-11f355bf06.pth.tar\",\n }\n for k,v in config_append.items():\n config[k] = v\n\n config = util.dict2obj(config)\n #config.savefolder = \"./test_results/debug\"\n config.savefolder = save_path\n\n save_name = os.path.split(image_path)[1].split(\".\")[0] + '.obj'\n util.check_mkdir(config.savefolder)\n fitting = WJPhotometricFitting(config, device=device_name)\n img = cv2.imread(image_path)\n w_h_scale = resize_para(img)\n\n face_detect = detector.SFDDetector(device_name, config.rect_model_path, w_h_scale)\n face_landmark = FAN_landmark.FANLandmarks(device_name, config.landmark_model_path, config.face_detect_type)\n\n seg_net = BiSeNet(n_classes=config.seg_class)\n seg_net.cuda()\n seg_net.load_state_dict(torch.load(config.face_seg_model))\n seg_net.eval()\n\n fitting.run(img, seg_net, face_detect, face_landmark, config.rect_thresh, save_name, config.savefolder)\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"torch.load",
"torch.cat",
"torch.from_numpy",
"torch.unsqueeze",
"numpy.save",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.zeros_like",
"matplotlib.pyplot.grid",
"torch.nn.functional.interpolate",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.where",
"matplotlib.pyplot.ylabel"
]
] |
gordicaleksa/pytorch-naive-video-neural-style-transfer
|
[
"3773483926f9f96534c45dc35251e296185a69e8"
] |
[
"manual_mask_cleaning.py"
] |
[
"\"\"\"\n Automatic segmentation is not always perfect, this scripts helps by providing semi-automatic mask cleaning.\n Final option is using some editing software and editing masks there (high cost).\n\n Usual workflow:\n 1. Copy processed_masks/ into processed_masks_refined/ (as this script is destructive)\n 2. Manually inspect masks and find the range that can be filled/deleted with a rectangular/custom mask\n 3. Tweak the params in top of the main function and run (start in debug mode if you're not sure how it works)\n\"\"\"\n\nimport os\nimport sys\nimport enum\n# Enables this project to see packages from pytorch-nst-feedforward submodule (e.g. utils)\nsys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'pytorch-nst-feedforward'))\n\n\nimport cv2 as cv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# Using functions from utils package from pytorch-nst-feedforward submodule like load_image\nfrom utils import utils\n\n\nclass Mode(enum.Enum):\n RECTANGULAR = 0,\n CUSTOM_MASK = 1\n\n\nif __name__ == \"__main__\":\n # place your paths here\n mask_root = r'C:\\tmp_data_dir\\YouTube\\CodingProjects\\pytorch-naive-video-nst\\data\\clip_example\\processed_masks_refined'\n clear_mask_path = r\"C:\\tmp_data_dir\\YouTube\\CodingProjects\\pytorch-naive-video-nst\\data\\clip_example\\custom_mask.png\"\n mode = Mode.RECTANGULAR\n FIRST_IMAGE_INDEX = 0 # specify the first image in the directory that should be processed\n LAST_IMAGE_INDEX = 100 # and the last one (included)\n should_delete = False # rectangular region and custom mask both fill (make white) the specified pixels\n\n if mode == Mode.CUSTOM_MASK:\n clear_mask = utils.load_image(clear_mask_path)[:, :, 0]\n\n for cnt, img_name in enumerate(os.listdir(mask_root)):\n img_path = os.path.join(mask_root, img_name)\n img = utils.load_image(img_path)[:, :, 0]\n\n # if in correct range\n if FIRST_IMAGE_INDEX <= cnt <= LAST_IMAGE_INDEX:\n # step1: edit image\n if mode == Mode.RECTANGULAR:\n # manually specify rectangular region here\n img[215:, 85:300] = 0. if should_delete else 1.\n elif mode == Mode.CUSTOM_MASK:\n img[clear_mask == 1.] = 0. if should_delete else 1.\n else:\n raise Exception(f'{mode} not supported.')\n # step2: overwrite old image\n cv.imwrite(img_path, np.uint8(img * 255))\n"
] |
[
[
"numpy.uint8"
]
] |
spencerlyon2/Distributions.py
|
[
"7e2a4c810c18e8292fa3c50c2f47347ee2707d58"
] |
[
"distcan/matrix.py"
] |
[
"\"\"\"\nMatrix variate distributions\n\n@author : Spencer Lyon <[email protected]>\n@date : 2015-01-07\n\n\"\"\"\nfrom math import log, sqrt, pi\nimport numpy as np\nimport scipy.stats as st\nimport scipy.linalg as la\nfrom scipy.special import gammaln, digamma\n\n__all__ = [\"Wishart\", \"InverseWishart\"]\n\n\n# imports to match julia\ndef logdet(x):\n return np.linalg.slogdet(x)[1]\n\nlogtwo = log(2.0)\n\n\ndef _unwhiten_cf(cf, x):\n \"\"\"\n Unwhiten a matrix\n\n Parameters\n ----------\n cf : array_like(ndim=2, dtype=float)\n The upper triangular part of the cholesky decomposition of a\n\n x : array_like(ndim=2, dtype=float)\n Not sure, just blindly porting...\n\n Returns\n -------\n updated matrix.\n\n \"\"\"\n return cf.T.dot(x)\n\n\ndef unwhiten(a, x):\n \"\"\"\n Unwhiten a matrix\n\n Parameters\n ----------\n a : array_like(ndim=2, dtype=float)\n Not sure, just blindly porting...\n\n x : array_like(ndim=2, dtype=float)\n Not sure, just blindly porting...\n\n Returns\n -------\n updated matrix.\n\n \"\"\"\n return _unwhiten_cf(la.cholesky(a), x)\n\n\ndef isposdef(X):\n \"Return if matrix is positive definite. Relies on cholesky decomp\"\n try:\n la.cholesky(X) # will raise LinAlgError if not positive def\n return True\n except la.LinAlgError:\n return False\n\n\n# ---------------- #\n# utility routines #\n# ---------------- #\n\n# Multidimensional gamma / partial gamma function\ndef lpgamma(p, a):\n \"\"\"\n Multidimensional gamma / partial gamma function\n\n Parameters\n ----------\n p : int\n something....\n\n a : float\n something....\n\n Returns\n -------\n Multidimensional gamma / partial gamma function\n\n \"\"\"\n res = p * (p - 1.0) / 4.0 * log(pi)\n for ii in range(1, p+1):\n res += gammaln(a + (1.0 - ii) / 2.0)\n\n return res\n\n\nclass WishartIWishartParent(object):\n\n def __init__(self, df, S):\n p = S.shape[0]\n self._p = p\n\n assert df > p - 1, \"df should be greater than S.shape[0] - 1.\"\n\n self.df = df\n self.S = S\n self.c0 = self._c0()\n\n self._S_cf = la.cholesky(S) # the triu part of cholesk decomp\n\n def __str__(self):\n nm, df, S = self._dist_name, self.df, self.S\n return \"%s\\n -df: %i\\n -S:\\n%s\" % (nm, df, S)\n\n def __repr__(self):\n return self.__str__()\n\n def insupport(self, X):\n \"\"\"\n Test if a matrix X is in the support of the distribution.\n Returns true iff the X is positive definite\n\n Parameters\n ----------\n X : array_like (dtype=float, ndim=2)\n A test matrix\n\n Returns\n -------\n ans : bool\n A boolean indicating if the matrix is in the support of the\n distribution\n\n \"\"\"\n return isposdef(X)\n\n def logpdf(self, X):\n if X.ndim == 2: # single point\n return self._logpdf1(X)\n else:\n # make sure we have proper dimensions\n (n, p1, p2) = X.shape\n\n if p1 != p2 or p1 != self._p:\n msg = \"Incorrect dimensions for logpdf a multiple points.\"\n msg += \"Must have dimensions (n, p, p) - n is # of points\"\n raise ValueError(msg)\n\n out = np.empty(n)\n\n for i in range(n):\n out[i] = self._logpdf1(X[i])\n\n def pdf(self, X):\n \"\"\"\n Evaluate the pdf of the distribution at various points\n\n Parameters\n ----------\n X : array_like(dtype=float, ndim=(2,3))\n Where to evaluate the pdf. If 2 dimensional, evaluate at\n single point. If X is three dimensional\n\n Returns\n -------\n out : scalar_or_array(dtype=float, ndim=(0,1))\n\n \"\"\"\n return np.exp(self.logpdf(X))\n\n def rand(self, n=1):\n \"\"\"\n Generate random samples from the distribution\n\n Parameters\n ----------\n n : int, optional(default=1)\n The number of samples to generate\n\n Returns\n -------\n out : array_like\n The generated samples\n\n \"\"\"\n if n == 1:\n return self._rand1()\n else:\n out = np.empty((n, self._p, self._p))\n for i in range(n):\n out[i] = self._rand1()\n\n return out\n\n\n# -------------------- #\n# Wishart Distribution #\n# -------------------- #\n\nclass Wishart(WishartIWishartParent):\n \"\"\"\n Wishart distribution\n\n Parameters\n ----------\n df : int\n The degrees of freedom parameter. Must be a positive integer\n\n S : array_like(dtype=float, ndim=2)\n The scale matrix\n\n Notes\n -----\n Follows the wikipedia parameterization.\n\n Translation of the associated file from Distributions.jl\n\n \"\"\"\n def __init__(self, df, S):\n super(Wishart, self).__init__(df, S)\n self._dist_name = \"Wishart Distribution\"\n\n def _c0(self):\n \"the logarithm of normalizing constant in pdf\"\n h_df = self.df / 2\n p, S = self._p, self.S\n\n return h_df * (logdet(S) + p * logtwo) + lpgamma(p, h_df)\n\n def _genA(self):\n \"\"\"\n Generate the matrix A in the Bartlett decomposition\n\n A is a lower triangular matrix, with\n\n A(i, j) ~ sqrt of Chisq(df - i + 1) when i == j\n ~ Normal() when i > j\n \"\"\"\n p, df = self._p, self.df\n A = np.zeros((p, p))\n\n for i in range(p):\n A[i, i] = sqrt(st.chi2.rvs(df - i))\n\n for j in range(p-1):\n for i in range(j+1, p):\n A[i, j] = np.random.randn()\n\n return A\n\n def _rand1(self):\n \"generate a single random sample\"\n Z = _unwhiten_cf(self._S_cf, self._genA())\n return Z.dot(Z.T)\n\n @property\n def mean(self):\n return self.df * self.S\n\n @property\n def mode(self):\n r = self.df - self._p - 1.0\n if r > 0.0:\n return self.S * r\n else:\n raise ValueError(\"mode is only defined when df > p + 1\")\n\n @property\n def meanlogdet(self):\n p, df, S = self._p, self.df, self.S\n v = logdet(S) + p * logtwo\n\n v += digamma(0.5 * (df - (np.arange(p)))).sum()\n\n return v\n\n @property\n def entropy(self):\n p, df, c0 = self._p, self.df, self.c0\n return c0 - 0.5*(df - p - 1) * self.meanlogdet + 0.5*df*p\n\n def _logpdf1(self, X):\n p, df, S, c0 = self._p, self.df, self.S, self.c0\n Xcf = la.cholesky(X)\n\n # multiply logdet by 2 b/c julia does in logdet(::CholFact)\n return 0.5*((df - (p + 1))*2*logdet(Xcf) -\n np.trace(la.solve(S, X))) - c0\n\n\n# -------------- #\n# InverseWishart #\n# -------------- #\n\nclass InverseWishart(WishartIWishartParent):\n \"\"\"\n Inverse Wishart distribution\n\n Parameters\n ----------\n df : int\n The degrees of freedom parameter. Must be a positive integer\n\n S : array_like(dtype=float, ndim=2)\n The scale matrix\n\n Notes\n -----\n Follows the wikipedia parameterization.\n\n Translation of the associated file from Distributions.jl.\n\n NOTATION: I changed Psi to S\n\n \"\"\"\n\n def __init__(self, df, S):\n super(InverseWishart, self).__init__(df, S)\n self._dist_name = \"Inverse Wishart Distribution\"\n self._Wishart = Wishart(df, la.inv(S))\n\n def _c0(self):\n \"the logarithm of normalizing constant in pdf\"\n h_df = self.df / 2\n p, S = self._p, self.S\n\n return h_df * (p * logtwo - logdet(S)) + lpgamma(p, h_df)\n\n @property\n def mean(self):\n df, p, S = self.df, self._p, self.S\n r = df - (p + 1)\n if r > 0.0:\n return S * (1.0 / r)\n else:\n raise ValueError(\"mean only defined for df > p + 1\")\n\n @property\n def mode(self):\n S, df, p = self.S, self.df, self._p\n return S / (df + p + 1.0)\n\n def _logpdf1(self, X):\n p, df, S, c0 = self._p, self.df, self.S, self.c0\n Xcf = la.cholesky(X)\n\n # we use the fact: trace(S * inv(X)) = trace(inv(X) * S) = trace(X\\S)\n return -0.5*((df + p + 1)*2*logdet(Xcf) +\n np.trace(la.solve(Xcf, S))) - c0\n\n def _rand1(self):\n return la.inv(self._Wishart._rand1())\n"
] |
[
[
"scipy.stats.chi2.rvs",
"numpy.linalg.slogdet",
"numpy.arange",
"scipy.linalg.solve",
"scipy.linalg.cholesky",
"numpy.random.randn",
"scipy.special.gammaln",
"scipy.linalg.inv",
"numpy.zeros",
"numpy.empty"
]
] |
springrid/udacity-drlnd-p1-navigation
|
[
"3d5bf24b6fe33b1efa249fa190e5299e7cfa5f87"
] |
[
"p1_navigation/dqn_agent.py"
] |
[
"import numpy as np\nimport random\nfrom collections import namedtuple, deque\n\nfrom model import QNetwork\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nBUFFER_SIZE = int(1e5) # replay buffer size\nBATCH_SIZE = 64 # minibatch size\nGAMMA = 0.99 # discount factor\nTAU = 1e-3 # for soft update of target parameters\nLR = 5e-4 # learning rate\nUPDATE_EVERY = 4 # how often to update the network\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nclass Agent():\n \"\"\"Interacts with and learns from the environment.\"\"\"\n\n def __init__(self, state_size, action_size, seed):\n \"\"\"Initialize an Agent object.\n\n Params\n ======\n state_size (int): dimension of each state\n action_size (int): dimension of each action\n seed (int): random seed\n \"\"\"\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(seed)\n\n # Q-Network\n self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)\n self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)\n self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)\n\n # Replay memory\n self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)\n # Initialize time step (for updating every UPDATE_EVERY steps)\n self.t_step = 0\n\n def step(self, state, action, reward, next_state, done):\n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n\n # Learn every UPDATE_EVERY time steps.\n self.t_step = (self.t_step + 1) % UPDATE_EVERY\n if self.t_step == 0:\n # If enough samples are available in memory, get random subset and learn\n if len(self.memory) > BATCH_SIZE:\n experiences = self.memory.sample()\n self.learn(experiences, GAMMA)\n\n def act(self, state, eps=0.):\n \"\"\"Returns actions for given state as per current policy.\n\n Params\n ======\n state (array_like): current state\n eps (float): epsilon, for epsilon-greedy action selection\n \"\"\"\n state = torch.from_numpy(state).float().unsqueeze(0).to(device)\n self.qnetwork_local.eval()\n with torch.no_grad():\n action_values = self.qnetwork_local(state)\n self.qnetwork_local.train()\n\n # Epsilon-greedy action selection\n if random.random() > eps:\n return np.argmax(action_values.cpu().data.numpy())\n else:\n return random.choice(np.arange(self.action_size))\n\n def learn(self, experiences, gamma):\n \"\"\"Update value parameters using given batch of experience tuples.\n\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples\n gamma (float): discount factor\n \"\"\"\n states, actions, rewards, next_states, dones = experiences\n\n # Compute Q targets for current states\n # DQN\n Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)\n\n # Double DQN\n actions_max = self.qnetwork_local(next_states).detach().argmax(1).unsqueeze(1)\n Q_targets_next = self.qnetwork_target(next_states).gather(1, actions_max)\n\n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n loss = F.mse_loss(Q_expected, Q_targets)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)\n\n def soft_update(self, local_model, target_model, tau):\n \"\"\"Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n\n Params\n ======\n local_model (PyTorch model): weights will be copied from\n target_model (PyTorch model): weights will be copied to\n tau (float): interpolation parameter\n \"\"\"\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)\n\n\nclass ReplayBuffer:\n \"\"\"Fixed-size buffer to store experience tuples.\"\"\"\n\n def __init__(self, action_size, buffer_size, batch_size, seed):\n \"\"\"Initialize a ReplayBuffer object.\n\n Params\n ======\n action_size (int): dimension of each action\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n seed (int): random seed\n \"\"\"\n self.action_size = action_size\n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)\n\n def add(self, state, action, reward, next_state, done):\n \"\"\"Add a new experience to memory.\"\"\"\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n\n def sample(self):\n \"\"\"Randomly sample a batch of experiences from memory.\"\"\"\n experiences = random.sample(self.memory, k=self.batch_size)\n\n states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)\n actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)\n rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)\n next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)\n dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)\n\n return (states, actions, rewards, next_states, dones)\n\n def __len__(self):\n \"\"\"Return the current size of internal memory.\"\"\"\n return len(self.memory)\n"
] |
[
[
"numpy.arange",
"torch.from_numpy",
"torch.nn.functional.mse_loss",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.vstack"
]
] |
easonyang1996/HistoImgProcess
|
[
"d12c61d7b33d2cc2bb9d2a26aa06ca295faacbc8"
] |
[
"CS-CO/model/cs_co.py"
] |
[
"#!~/anaconda3/bin/python3\n# ******************************************************\n# Author: Pengshuai Yang\n# Last modified: 2021-01-13 17:58\n# Email: [email protected]\n# Filename: cs_co.py\n# Description: \n# cross stain contrastive learning \n# ******************************************************\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .resnet import resnet18, resnet34, resnet50, resnet101, resnet152\nfrom .model_parts import Up, OutConv\nimport numpy as np\nimport copy\nfrom functools import wraps\n\nBACKBONE = {'resnet18': resnet18, 'resnet34': resnet34, \n 'resnet50': resnet50, 'resnet101': resnet101, \n 'resnet152': resnet152}\n\n# helper functions\n\n\ndef singleton(cache_key):\n def inner_fn(fn):\n @wraps(fn)\n def wrapper(self, *args, **kwargs):\n instance = getattr(self, cache_key)\n if instance is not None:\n return instance\n\n instance = fn(self, *args, **kwargs)\n setattr(self, cache_key, instance)\n return instance\n return wrapper\n return inner_fn\n\n'''\ndef get_module_device(module):\n return next(module.parameters()).device\n'''\n\ndef set_requires_grad(model, val):\n for p in model.parameters():\n p.requires_grad = val\n\n\n# exponential moving average\n\nclass EMA():\n def __init__(self, beta):\n super().__init__()\n self.beta = beta\n\n def update_average(self, old, new):\n if old is None:\n return new\n return old * self.beta + (1 - self.beta) * new\n\ndef update_moving_average(ema_updater, ma_model, current_model):\n for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):\n old_weight, up_weight = ma_params.data, current_params.data\n ma_params.data = ema_updater.update_average(old_weight, up_weight)\n\n\n###################################### sub module ###################################\n\nclass Encoder(nn.Module):\n def __init__(self, encoder_name, pretrained=False, half_channel=False,\n in_channel=3):\n super(Encoder, self).__init__()\n self.backbone = BACKBONE[encoder_name](pretrained=pretrained,\n half_channel=half_channel,\n in_channel=in_channel)\n\n def forward(self, x):\n x = self.backbone(x)\n return x\n\n\nclass Decoder(nn.Module):\n def __init__(self, encoder_name, out_channel, half_channel=False,\n bilinear=True, decoder_freeze=False):\n super(Decoder, self).__init__()\n if encoder_name[-2:] in ['18', '34']: \n decoder_channel = np.array([512, 256, 128, 64, 64])\n else:\n decoder_channel = np.array([2048, 1024, 512, 256, 64]) \n\n if half_channel==True:\n decoder_channel = decoder_channel // 2\n\n self.up1 = Up(decoder_channel[0], decoder_channel[1], bilinear=bilinear)\n self.up2 = Up(decoder_channel[1], decoder_channel[2], bilinear=bilinear)\n self.up3 = Up(decoder_channel[2], decoder_channel[3], bilinear=bilinear)\n self.up4 = Up(decoder_channel[3], decoder_channel[4], bilinear=bilinear)\n self.up5 = Up(decoder_channel[4], out_channel, bilinear=bilinear)\n\n if decoder_freeze:\n for p in self.parameters():\n p.requires_grad = False\n\n\n def forward(self, x):\n x = self.up1(x)\n x = self.up2(x)\n x = self.up3(x)\n x = self.up4(x)\n x = self.up5(x)\n return x\n\n\nclass HE_Encoder(nn.Module):\n def __init__(self, encoder_name, in_channel, pretrained=False,\n half_channel=False):\n super(HE_Encoder, self).__init__()\n self.H2E_encoder = Encoder(encoder_name, pretrained=pretrained,\n half_channel=half_channel,\n in_channel=in_channel)\n self.E2H_encoder = Encoder(encoder_name, pretrained=pretrained,\n half_channel=half_channel,\n in_channel=in_channel)\n\n def forward(self, h, e):\n h_out = self.H2E_encoder(h)\n e_out = self.E2H_encoder(e)\n return h_out, e_out\n\n\nclass HE_Decoder(nn.Module):\n def __init__(self, encoder_name, in_channel, half_channel=False,\n bilinear=True, decoder_freeze=False):\n super(HE_Decoder, self).__init__()\n self.H2E_decoder = Decoder(encoder_name, out_channel=in_channel,\n half_channel=half_channel,\n bilinear=bilinear, decoder_freeze=decoder_freeze)\n self.E2H_decoder = Decoder(encoder_name, out_channel=in_channel,\n half_channel=half_channel,\n bilinear=bilinear, decoder_freeze=decoder_freeze)\n\n def forward(self, h_out, e_out):\n e_pred = self.H2E_decoder(h_out)\n h_pred = self.E2H_decoder(e_out)\n return e_pred, h_pred\n\n\n# MLP class for projector and predictor\n\nclass MLP(nn.Module):\n def __init__(self, dim, projection_size, hidden_size = 4096):\n super().__init__()\n self.net = nn.Sequential(\n nn.Linear(dim, hidden_size),\n nn.BatchNorm1d(hidden_size),\n nn.ReLU(inplace=True),\n nn.Linear(hidden_size, projection_size)\n )\n\n def forward(self, x):\n return self.net(x)\n\n\n####################################### main module ##################################\n# main class\nclass CS_CO(nn.Module):\n def __init__(self, encoder_name, in_channel, projection_size=256,\n model_type='cs', bilinear=True, half_channel=False,\n decoder_freeze=False, pretrained=False, \n pretrained_recon=None, moving_average_decay=0.99, \n use_momentum=True, return_embedding=False):\n super(CS_CO, self).__init__()\n\n self.model_type = model_type\n self.use_momentum = use_momentum\n self.return_embedding = return_embedding\n \n self.online_he_encoder = HE_Encoder(encoder_name, in_channel, pretrained,\n half_channel) \n self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n\n if not return_embedding:\n self.he_decoder = HE_Decoder(encoder_name, in_channel, half_channel,\n bilinear, decoder_freeze) \n\n if pretrained_recon != 'None':\n self._load_encoder_weight(pretrained_recon)\n self._load_decoder_weight(pretrained_recon)\n\n if model_type == 'cs-co':\n mlp_dim = 1024 if encoder_name[-2:] in ['18', '34'] else 2048\n mlp_dim = mlp_dim//2 if half_channel else mlp_dim\n \n self.online_projector = MLP(dim=mlp_dim,\n projection_size=projection_size,\n hidden_size=4096)\n self.online_predictor = MLP(dim=projection_size,\n projection_size=projection_size,\n hidden_size=4096)\n\n self.target_encoder = None\n self.target_projector = None \n self.target_ema_updater = EMA(moving_average_decay)\n\n\n init_h = torch.randn(2, in_channel, 224, 224)\n init_e = torch.randn(2, in_channel, 224, 224)\n init_h_prime = torch.randn(2, in_channel, 224, 224)\n init_e_prime = torch.randn(2, in_channel, 224, 224)\n\n self.forward(init_h, init_e, init_h_prime, init_e_prime)\n\n def _load_encoder_weight(self, weight_path):\n # load encoder weight\n pretrained_dict = torch.load(weight_path)\n pretrained_dict = {k[18:]:v for k,v in pretrained_dict.items() if\n k[:18]=='online_he_encoder.'}\n self.online_he_encoder.load_state_dict(pretrained_dict)\n\n def _load_decoder_weight(self, weight_path):\n # load decoder weight\n pretrained_dict = torch.load(weight_path)\n pretrained_dict = {k[11:]:v for k,v in pretrained_dict.items() if\n k[:11]=='he_decoder.'}\n self.he_decoder.load_state_dict(pretrained_dict)\n\n @singleton('target_encoder')\n def _get_target_encoder(self):\n target_encoder = copy.deepcopy(self.online_he_encoder)\n set_requires_grad(target_encoder, False)\n return target_encoder\n \n @singleton('target_projector')\n def _get_target_project(self):\n target_projector = copy.deepcopy(self.online_projector)\n set_requires_grad(target_projector, False)\n return target_projector\n\n def reset_moving_average(self):\n del self.target_encoder\n self.target_encoder = None\n del self.target_projector\n self.target_projector = None\n\n def update_moving_average(self):\n assert self.use_momentum, 'you do not need to update the moving average, since you have turned off momentum for the target encoder'\n assert self.target_encoder is not None, 'target encoder has not been created yet'\n update_moving_average(self.target_ema_updater, self.target_encoder,\n self.online_he_encoder)\n assert self.target_projector is not None, 'target projector has not been created yet'\n update_moving_average(self.target_ema_updater, self.target_projector,\n self.online_projector)\n\n def forward(self, h, e, h_prime=None, e_prime=None):\n online_enco_one_h, online_enco_one_e = self.online_he_encoder(h, e)\n \n if self.return_embedding:\n online_enco_one_h_pool = self.avgpool(online_enco_one_h)\n online_enco_one_e_pool = self.avgpool(online_enco_one_e)\n embedding = torch.cat([online_enco_one_h_pool,\n online_enco_one_e_pool], dim=1)\n embedding = torch.flatten(embedding, 1)\n return embedding\n\n pred_one_e, pred_one_h = self.he_decoder(online_enco_one_h,\n online_enco_one_e)\n\n if self.model_type == 'cs':\n return pred_one_e, pred_one_h \n\n if h_prime!=None:\n online_enco_two_h, online_enco_two_e =self.online_he_encoder(h_prime,\n e_prime)\n\n if self.model_type == 'cs-co':\n # h e \n online_enco_one_h_pool = self.avgpool(online_enco_one_h)\n online_enco_one_e_pool = self.avgpool(online_enco_one_e)\n online_enco_one = torch.cat([online_enco_one_h_pool,\n online_enco_one_e_pool], dim=1)\n online_enco_one = torch.flatten(online_enco_one, 1)\n online_proj_one = self.online_projector(online_enco_one)\n online_pred_one = self.online_predictor(online_proj_one)\n \n # h_prime e_prime \n online_enco_two_h_pool = self.avgpool(online_enco_two_h)\n online_enco_two_e_pool = self.avgpool(online_enco_two_e)\n online_enco_two = torch.cat([online_enco_two_h_pool,\n online_enco_two_e_pool], dim=1)\n online_enco_two = torch.flatten(online_enco_two, 1)\n online_proj_two = self.online_projector(online_enco_two)\n online_pred_two = self.online_predictor(online_proj_two)\n # target branch\n with torch.no_grad():\n target_encoder = self._get_target_encoder() if self.use_momentum else self.online_he_encoder\n target_projector = self._get_target_project() if self.use_momentum else self.online_projector\n \n # h e \n target_enco_one_h, target_enco_one_e = target_encoder(h, e)\n target_enco_one_h_pool = self.avgpool(target_enco_one_h)\n target_enco_one_e_pool = self.avgpool(target_enco_one_e)\n target_enco_one = torch.cat([target_enco_one_h_pool,\n target_enco_one_e_pool], dim=1)\n target_enco_one = torch.flatten(target_enco_one, 1)\n target_proj_one = target_projector(target_enco_one)\n target_proj_one.detach_()\n \n # h_prime e_prime \n target_enco_two_h, target_enco_two_e = target_encoder(h_prime,\n e_prime)\n target_enco_two_h_pool = self.avgpool(target_enco_two_h)\n target_enco_two_e_pool = self.avgpool(target_enco_two_e)\n target_enco_two = torch.cat([target_enco_two_h_pool,\n target_enco_two_e_pool], dim=1)\n target_enco_two = torch.flatten(target_enco_two, 1)\n target_proj_two = target_projector(target_enco_two)\n target_proj_two.detach_()\n \n return online_pred_one, online_pred_two, target_proj_one, target_proj_two, pred_one_e, pred_one_h\n\n\ndef Cs_co(network, in_channel, model_type='cs', decoder_freeze=False, \n pretrained_recon=None, moving_average_decay=0.99, \n use_momentum=True, return_embedding=False):\n return CS_CO(encoder_name=network, in_channel=in_channel,\n model_type=model_type, decoder_freeze=decoder_freeze,\n pretrained_recon=pretrained_recon,\n moving_average_decay=moving_average_decay,\n use_momentum=use_momentum, return_embedding=return_embedding)\n\n"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.load",
"torch.cat",
"torch.randn",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.no_grad",
"torch.flatten",
"torch.nn.ReLU",
"numpy.array"
]
] |
iwamura-lab/my_codes
|
[
"70140fe81b70d7ea4969c442771db40054cc109e"
] |
[
"my_codes/phonon/total_dos.py"
] |
[
"#!/usr/bin/env python\n\nimport argparse\n\nimport numpy as np\nimport phonopy\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\", \"--mesh\", type=int, default=20, help=\"mesh number\")\n parser.add_argument(\"-s\", \"--save\", action=\"store_true\", help=\"Save figure mode\")\n args = parser.parse_args()\n\n pa = np.array([[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]])\n phonon = phonopy.load(\"phonopy_params.yaml\", primitive_matrix=pa)\n # phonon = phonopy.load('phonopy_params.yaml')\n phonon.run_mesh([args.mesh] * 3)\n phonon.run_total_dos()\n # phonon.run_total_dos(freq_min=0, freq_max=8.5, freq_pitch=0.05)\n if args.save:\n phonon.plot_total_dos().savefig(\"mesh_\" + str(args.mesh) + \".png\")\n else:\n phonon.plot_total_dos().show()\n phonon.write_total_dos(filename=\"total_dos\" + str(args.mesh) + \".dat\")\n"
] |
[
[
"numpy.array"
]
] |
IcewineChen/pytorch-pyranet
|
[
"2cb56bbda10b3c90efa5baed87381063cb180420"
] |
[
"utils/debugger.py"
] |
[
"import numpy as np\nimport cv2\nimport ref as ref\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.mplot3d\nfrom mpl_toolkits.mplot3d import Axes3D\ntry:\n #import mayavi.mlab\n pass\nexcept:\n pass\n \ndef show2D(img, points, c):\n points = ((points.reshape(ref.nJoints, -1))).astype(np.int32)\n for j in range(ref.nJoints):\n cv2.circle(img, (points[j, 0], points[j, 1]), 3, c, -1)\n for e in ref.edges:\n cv2.line(img, (points[e[0], 0], points[e[0], 1]),\n (points[e[1], 0], points[e[1], 1]), c, 2)\n return img\n\nclass Debugger(object):\n def __init__(self):\n self.plt = plt\n self.fig = self.plt.figure()\n self.ax = self.fig.add_subplot((111),projection='3d')\n self.ax.grid(False)\n #self.ax.set_xlabel('x') \n #self.ax.set_ylabel('y') \n #self.ax.set_zlabel('z')\n oo = 1e10\n self.xmax, self.ymax, self.zmax = -oo, -oo, -oo\n self.xmin, self.ymin, self.zmin = oo, oo, oo\n self.imgs = {}\n self.vols = {}\n try:\n self.mayavi = mayavi\n xx = [0, 0, 0, 0, ref.outputRes, ref.outputRes, ref.outputRes, ref.outputRes]\n yy = [0, 0, ref.outputRes, ref.outputRes, 0, 0, ref.outputRes, ref.outputRes]\n zz = [0, ref.outputRes, 0, ref.outputRes, 0, ref.outputRes, 0, ref.outputRes]\n self.mayavi.mlab.points3d(xx, yy, zz,\n mode = \"cube\",\n color = (0, 0, 0),\n opacity = 1,\n scale_factor=1)\n except:\n pass\n \n def addImg(self, img, imgId = 'default'):\n self.imgs[imgId] = img.copy()\n \n def addVol(self, vol, c = (0, 1, 0), threshold = 0.5, volID = 'default'):\n self.vols[volID] = vol.copy()\n zz, yy, xx = np.where(self.vols[volID] > threshold)\n self.mayavi.mlab.points3d(xx, ref.outputRes - yy, zz,\n mode = \"cube\",\n color = c,\n opacity = 0.1,\n scale_factor=1)\n \n def addPoint2D(self, point, c, imgId = 'default'):\n self.imgs[imgId] = show2D(self.imgs[imgId], point, c)\n \n def showImg(self, pause = False, imgId = 'default'):\n cv2.imshow('{}'.format(imgId), self.imgs[imgId])\n if pause:\n cv2.waitKey()\n \n def showVol(self):\n self.mayavi.mlab.show()\n \n def showAllImg(self, pause = False):\n for i, v in self.imgs.items():\n cv2.imshow('{}'.format(i), v)\n if pause:\n cv2.waitKey()\n \n def saveImg(self, imgId = 'default', path = '../debug/'):\n cv2.imwrite(path + '{}.png'.format(imgId), self.imgs[imgId])\n \n def saveAllImg(self, path = '../debug/'):\n for i, v in self.imgs.items():\n cv2.imwrite(path + '/{}.png'.format(i), v)\n \n"
] |
[
[
"numpy.where"
]
] |
NeuralNetworkLab/Stream-Fusion-Network
|
[
"6e4232352953f6a6a1fba4ce022cd8a462610215"
] |
[
"tf_model_zoo/models/syntaxnet/syntaxnet/graph_builder.py"
] |
[
"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Builds parser models.\"\"\"\n\nimport tensorflow as tf\n\nimport syntaxnet.load_parser_ops\n\nfrom tensorflow.python.ops import control_flow_ops as cf\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.platform import tf_logging as logging\n\nfrom syntaxnet.ops import gen_parser_ops\n\n\ndef BatchedSparseToDense(sparse_indices, output_size):\n \"\"\"Batch compatible sparse to dense conversion.\n\n This is useful for one-hot coded target labels.\n\n Args:\n sparse_indices: [batch_size] tensor containing one index per batch\n output_size: needed in order to generate the correct dense output\n\n Returns:\n A [batch_size, output_size] dense tensor.\n \"\"\"\n eye = tf.diag(tf.fill([output_size], tf.constant(1, tf.float32)))\n return tf.nn.embedding_lookup(eye, sparse_indices)\n\n\ndef EmbeddingLookupFeatures(params, sparse_features, allow_weights):\n \"\"\"Computes embeddings for each entry of sparse features sparse_features.\n\n Args:\n params: list of 2D tensors containing vector embeddings\n sparse_features: 1D tensor of strings. Each entry is a string encoding of\n dist_belief.SparseFeatures, and represents a variable length list of\n feature ids, and optionally, corresponding weights values.\n allow_weights: boolean to control whether the weights returned from the\n SparseFeatures are used to multiply the embeddings.\n\n Returns:\n A tensor representing the combined embeddings for the sparse features.\n For each entry s in sparse_features, the function looks up the embeddings\n for each id and sums them into a single tensor weighing them by the\n weight of each id. It returns a tensor with each entry of sparse_features\n replaced by this combined embedding.\n \"\"\"\n if not isinstance(params, list):\n params = [params]\n # Lookup embeddings.\n sparse_features = tf.convert_to_tensor(sparse_features)\n indices, ids, weights = gen_parser_ops.unpack_sparse_features(sparse_features)\n embeddings = tf.nn.embedding_lookup(params, ids)\n\n if allow_weights:\n # Multiply by weights, reshaping to allow broadcast.\n broadcast_weights_shape = tf.concat(0, [tf.shape(weights), [1]])\n embeddings *= tf.reshape(weights, broadcast_weights_shape)\n\n # Sum embeddings by index.\n return tf.unsorted_segment_sum(embeddings, indices, tf.size(sparse_features))\n\n\nclass GreedyParser(object):\n \"\"\"Builds a Chen & Manning style greedy neural net parser.\n\n Builds a graph with an optional reader op connected at one end and\n operations needed to train the network on the other. Supports multiple\n network instantiations sharing the same parameters and network topology.\n\n The following named nodes are added to the training and eval networks:\n epochs: a tensor containing the current epoch number\n cost: a tensor containing the current training step cost\n gold_actions: a tensor containing actions from gold decoding\n feature_endpoints: a list of sparse feature vectors\n logits: output of the final layer before computing softmax\n The training network also contains:\n train_op: an op that executes a single training step\n\n Typical usage:\n\n parser = graph_builder.GreedyParser(num_actions, num_features,\n num_feature_ids, embedding_sizes,\n hidden_layer_sizes)\n parser.AddTraining(task_context, batch_size=5)\n with tf.Session('local') as sess:\n # This works because the session uses the same default graph as the\n # GraphBuilder did.\n sess.run(parser.inits.values())\n while True:\n tf_epoch, _ = sess.run([parser.training['epoch'],\n parser.training['train_op']])\n if tf_epoch[0] > 0:\n break\n \"\"\"\n\n def __init__(self,\n num_actions,\n num_features,\n num_feature_ids,\n embedding_sizes,\n hidden_layer_sizes,\n seed=None,\n gate_gradients=False,\n use_locking=False,\n embedding_init=1.0,\n relu_init=1e-4,\n bias_init=0.2,\n softmax_init=1e-4,\n averaging_decay=0.9999,\n use_averaging=True,\n check_parameters=True,\n check_every=1,\n allow_feature_weights=False,\n only_train='',\n arg_prefix=None,\n **unused_kwargs):\n \"\"\"Initialize the graph builder with parameters defining the network.\n\n Args:\n num_actions: int size of the set of parser actions\n num_features: int list of dimensions of the feature vectors\n num_feature_ids: int list of same length as num_features corresponding to\n the sizes of the input feature spaces\n embedding_sizes: int list of same length as num_features of the desired\n embedding layer sizes\n hidden_layer_sizes: int list of desired relu layer sizes; may be empty\n seed: optional random initializer seed to enable reproducibility\n gate_gradients: if True, gradient updates are computed synchronously,\n ensuring consistency and reproducibility\n use_locking: if True, use locking to avoid read-write contention when\n updating Variables\n embedding_init: sets the std dev of normal initializer of embeddings to\n embedding_init / embedding_size ** .5\n relu_init: sets the std dev of normal initializer of relu weights\n to relu_init\n bias_init: sets constant initializer of relu bias to bias_init\n softmax_init: sets the std dev of normal initializer of softmax init\n to softmax_init\n averaging_decay: decay for exponential moving average when computing\n averaged parameters, set to 1 to do vanilla averaging\n use_averaging: whether to use moving averages of parameters during evals\n check_parameters: whether to check for NaN/Inf parameters during\n training\n check_every: checks numerics every check_every steps.\n allow_feature_weights: whether feature weights are allowed.\n only_train: the comma separated set of parameter names to train. If empty,\n all model parameters will be trained.\n arg_prefix: prefix for context parameters.\n \"\"\"\n self._num_actions = num_actions\n self._num_features = num_features\n self._num_feature_ids = num_feature_ids\n self._embedding_sizes = embedding_sizes\n self._hidden_layer_sizes = hidden_layer_sizes\n self._seed = seed\n self._gate_gradients = gate_gradients\n self._use_locking = use_locking\n self._use_averaging = use_averaging\n self._check_parameters = check_parameters\n self._check_every = check_every\n self._allow_feature_weights = allow_feature_weights\n self._only_train = set(only_train.split(',')) if only_train else None\n self._feature_size = len(embedding_sizes)\n self._embedding_init = embedding_init\n self._relu_init = relu_init\n self._softmax_init = softmax_init\n self._arg_prefix = arg_prefix\n # Parameters of the network with respect to which training is done.\n self.params = {}\n # Other variables, with respect to which no training is done, but which we\n # nonetheless need to save in order to capture the state of the graph.\n self.variables = {}\n # Operations to initialize any nodes that require initialization.\n self.inits = {}\n # Training- and eval-related nodes.\n self.training = {}\n self.evaluation = {}\n self.saver = None\n # Nodes to compute moving averages of parameters, called every train step.\n self._averaging = {}\n self._averaging_decay = averaging_decay\n # Pretrained embeddings that can be used instead of constant initializers.\n self._pretrained_embeddings = {}\n # After the following 'with' statement, we'll be able to re-enter the\n # 'params' scope by re-using the self._param_scope member variable. See for\n # instance _AddParam.\n with tf.name_scope('params') as self._param_scope:\n self._relu_bias_init = tf.constant_initializer(bias_init)\n\n @property\n def embedding_size(self):\n size = 0\n for i in range(self._feature_size):\n size += self._num_features[i] * self._embedding_sizes[i]\n return size\n\n def _AddParam(self,\n shape,\n dtype,\n name,\n initializer=None,\n return_average=False):\n \"\"\"Add a model parameter w.r.t. we expect to compute gradients.\n\n _AddParam creates both regular parameters (usually for training) and\n averaged nodes (usually for inference). It returns one or the other based\n on the 'return_average' arg.\n\n Args:\n shape: int list, tensor shape of the parameter to create\n dtype: tf.DataType, data type of the parameter\n name: string, name of the parameter in the TF graph\n initializer: optional initializer for the paramter\n return_average: if False, return parameter otherwise return moving average\n\n Returns:\n parameter or averaged parameter\n \"\"\"\n if name not in self.params:\n step = tf.cast(self.GetStep(), tf.float32)\n # Put all parameters and their initializing ops in their own scope\n # irrespective of the current scope (training or eval).\n with tf.name_scope(self._param_scope):\n self.params[name] = tf.get_variable(name, shape, dtype, initializer)\n param = self.params[name]\n if initializer is not None:\n self.inits[name] = state_ops.init_variable(param, initializer)\n if self._averaging_decay == 1:\n logging.info('Using vanilla averaging of parameters.')\n ema = tf.train.ExponentialMovingAverage(decay=(step / (step + 1.0)),\n num_updates=None)\n else:\n ema = tf.train.ExponentialMovingAverage(decay=self._averaging_decay,\n num_updates=step)\n self._averaging[name + '_avg_update'] = ema.apply([param])\n self.variables[name + '_avg_var'] = ema.average(param)\n self.inits[name + '_avg_init'] = state_ops.init_variable(\n ema.average(param), tf.zeros_initializer)\n return (self.variables[name + '_avg_var'] if return_average else\n self.params[name])\n\n def GetStep(self):\n def OnesInitializer(shape, dtype=tf.float32, partition_info=None):\n return tf.ones(shape, dtype)\n return self._AddVariable([], tf.int32, 'step', OnesInitializer)\n\n def _AddVariable(self, shape, dtype, name, initializer=None):\n if name in self.variables:\n return self.variables[name]\n self.variables[name] = tf.get_variable(name, shape, dtype, initializer)\n if initializer is not None:\n self.inits[name] = state_ops.init_variable(self.variables[name],\n initializer)\n return self.variables[name]\n\n def _ReluWeightInitializer(self):\n with tf.name_scope(self._param_scope):\n return tf.random_normal_initializer(stddev=self._relu_init,\n seed=self._seed)\n\n def _EmbeddingMatrixInitializer(self, index, embedding_size):\n if index in self._pretrained_embeddings:\n return self._pretrained_embeddings[index]\n else:\n return tf.random_normal_initializer(\n stddev=self._embedding_init / embedding_size**.5,\n seed=self._seed)\n\n def _AddEmbedding(self,\n features,\n num_features,\n num_ids,\n embedding_size,\n index,\n return_average=False):\n \"\"\"Adds an embedding matrix and passes the `features` vector through it.\"\"\"\n embedding_matrix = self._AddParam(\n [num_ids, embedding_size],\n tf.float32,\n 'embedding_matrix_%d' % index,\n self._EmbeddingMatrixInitializer(index, embedding_size),\n return_average=return_average)\n embedding = EmbeddingLookupFeatures(embedding_matrix,\n tf.reshape(features,\n [-1],\n name='feature_%d' % index),\n self._allow_feature_weights)\n return tf.reshape(embedding, [-1, num_features * embedding_size])\n\n def _BuildNetwork(self, feature_endpoints, return_average=False):\n \"\"\"Builds a feed-forward part of the net given features as input.\n\n The network topology is already defined in the constructor, so multiple\n calls to BuildForward build multiple networks whose parameters are all\n shared. It is the source of the input features and the use of the output\n that distinguishes each network.\n\n Args:\n feature_endpoints: tensors with input features to the network\n return_average: whether to use moving averages as model parameters\n\n Returns:\n logits: output of the final layer before computing softmax\n \"\"\"\n assert len(feature_endpoints) == self._feature_size\n\n # Create embedding layer.\n embeddings = []\n for i in range(self._feature_size):\n embeddings.append(self._AddEmbedding(feature_endpoints[i],\n self._num_features[i],\n self._num_feature_ids[i],\n self._embedding_sizes[i],\n i,\n return_average=return_average))\n\n last_layer = tf.concat(1, embeddings)\n last_layer_size = self.embedding_size\n\n # Create ReLU layers.\n for i, hidden_layer_size in enumerate(self._hidden_layer_sizes):\n weights = self._AddParam(\n [last_layer_size, hidden_layer_size],\n tf.float32,\n 'weights_%d' % i,\n self._ReluWeightInitializer(),\n return_average=return_average)\n bias = self._AddParam([hidden_layer_size],\n tf.float32,\n 'bias_%d' % i,\n self._relu_bias_init,\n return_average=return_average)\n last_layer = tf.nn.relu_layer(last_layer,\n weights,\n bias,\n name='layer_%d' % i)\n last_layer_size = hidden_layer_size\n\n # Create softmax layer.\n softmax_weight = self._AddParam(\n [last_layer_size, self._num_actions],\n tf.float32,\n 'softmax_weight',\n tf.random_normal_initializer(stddev=self._softmax_init,\n seed=self._seed),\n return_average=return_average)\n softmax_bias = self._AddParam(\n [self._num_actions],\n tf.float32,\n 'softmax_bias',\n tf.zeros_initializer,\n return_average=return_average)\n logits = tf.nn.xw_plus_b(last_layer,\n softmax_weight,\n softmax_bias,\n name='logits')\n return {'logits': logits}\n\n def _AddGoldReader(self, task_context, batch_size, corpus_name):\n features, epochs, gold_actions = (\n gen_parser_ops.gold_parse_reader(task_context,\n self._feature_size,\n batch_size,\n corpus_name=corpus_name,\n arg_prefix=self._arg_prefix))\n return {'gold_actions': tf.identity(gold_actions,\n name='gold_actions'),\n 'epochs': tf.identity(epochs,\n name='epochs'),\n 'feature_endpoints': features}\n\n def _AddDecodedReader(self, task_context, batch_size, transition_scores,\n corpus_name):\n features, epochs, eval_metrics, documents = (\n gen_parser_ops.decoded_parse_reader(transition_scores,\n task_context,\n self._feature_size,\n batch_size,\n corpus_name=corpus_name,\n arg_prefix=self._arg_prefix))\n return {'eval_metrics': eval_metrics,\n 'epochs': tf.identity(epochs,\n name='epochs'),\n 'feature_endpoints': features,\n 'documents': documents}\n\n def _AddCostFunction(self, batch_size, gold_actions, logits):\n \"\"\"Cross entropy plus L2 loss on weights and biases of the hidden layers.\"\"\"\n dense_golden = BatchedSparseToDense(gold_actions, self._num_actions)\n cross_entropy = tf.div(\n tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(\n logits, dense_golden)), batch_size)\n regularized_params = [tf.nn.l2_loss(p)\n for k, p in self.params.items()\n if k.startswith('weights') or k.startswith('bias')]\n l2_loss = 1e-4 * tf.add_n(regularized_params) if regularized_params else 0\n return {'cost': tf.add(cross_entropy, l2_loss, name='cost')}\n\n def AddEvaluation(self,\n task_context,\n batch_size,\n evaluation_max_steps=300,\n corpus_name='documents'):\n \"\"\"Builds the forward network only without the training operation.\n\n Args:\n task_context: file path from which to read the task context.\n batch_size: batch size to request from reader op.\n evaluation_max_steps: max number of parsing actions during evaluation,\n only used in beam parsing.\n corpus_name: name of the task input to read parses from.\n\n Returns:\n Dictionary of named eval nodes.\n \"\"\"\n def _AssignTransitionScores():\n return tf.assign(nodes['transition_scores'],\n nodes['logits'], validate_shape=False)\n def _Pass():\n return tf.constant(-1.0)\n unused_evaluation_max_steps = evaluation_max_steps\n with tf.name_scope('evaluation'):\n nodes = self.evaluation\n nodes['transition_scores'] = self._AddVariable(\n [batch_size, self._num_actions], tf.float32, 'transition_scores',\n tf.constant_initializer(-1.0))\n nodes.update(self._AddDecodedReader(task_context, batch_size, nodes[\n 'transition_scores'], corpus_name))\n nodes.update(self._BuildNetwork(nodes['feature_endpoints'],\n return_average=self._use_averaging))\n nodes['eval_metrics'] = cf.with_dependencies(\n [tf.cond(tf.greater(tf.size(nodes['logits']), 0),\n _AssignTransitionScores, _Pass)],\n nodes['eval_metrics'], name='eval_metrics')\n return nodes\n\n def _IncrementCounter(self, counter):\n return state_ops.assign_add(counter, 1, use_locking=True)\n\n def _AddLearningRate(self, initial_learning_rate, decay_steps):\n \"\"\"Returns a learning rate that decays by 0.96 every decay_steps.\n\n Args:\n initial_learning_rate: initial value of the learning rate\n decay_steps: decay by 0.96 every this many steps\n\n Returns:\n learning rate variable.\n \"\"\"\n step = self.GetStep()\n return cf.with_dependencies(\n [self._IncrementCounter(step)],\n tf.train.exponential_decay(initial_learning_rate,\n step,\n decay_steps,\n 0.96,\n staircase=True))\n\n def AddPretrainedEmbeddings(self, index, embeddings_path, task_context):\n \"\"\"Embeddings at the given index will be set to pretrained values.\"\"\"\n\n def _Initializer(shape, dtype=tf.float32, partition_info=None):\n unused_dtype = dtype\n t = gen_parser_ops.word_embedding_initializer(\n vectors=embeddings_path,\n task_context=task_context,\n embedding_init=self._embedding_init)\n\n t.set_shape(shape)\n return t\n\n self._pretrained_embeddings[index] = _Initializer\n\n def AddTraining(self,\n task_context,\n batch_size,\n learning_rate=0.1,\n decay_steps=4000,\n momentum=0.9,\n corpus_name='documents'):\n \"\"\"Builds a trainer to minimize the cross entropy cost function.\n\n Args:\n task_context: file path from which to read the task context\n batch_size: batch size to request from reader op\n learning_rate: initial value of the learning rate\n decay_steps: decay learning rate by 0.96 every this many steps\n momentum: momentum parameter used when training with momentum\n corpus_name: name of the task input to read parses from\n\n Returns:\n Dictionary of named training nodes.\n \"\"\"\n with tf.name_scope('training'):\n nodes = self.training\n nodes.update(self._AddGoldReader(task_context, batch_size, corpus_name))\n nodes.update(self._BuildNetwork(nodes['feature_endpoints'],\n return_average=False))\n nodes.update(self._AddCostFunction(batch_size, nodes['gold_actions'],\n nodes['logits']))\n # Add the optimizer\n if self._only_train:\n trainable_params = [v\n for k, v in self.params.iteritems()\n if k in self._only_train]\n else:\n trainable_params = self.params.values()\n lr = self._AddLearningRate(learning_rate, decay_steps)\n optimizer = tf.train.MomentumOptimizer(lr,\n momentum,\n use_locking=self._use_locking)\n train_op = optimizer.minimize(nodes['cost'], var_list=trainable_params)\n for param in trainable_params:\n slot = optimizer.get_slot(param, 'momentum')\n self.inits[slot.name] = state_ops.init_variable(slot,\n tf.zeros_initializer)\n self.variables[slot.name] = slot\n numerical_checks = [\n tf.check_numerics(param,\n message='Parameter is not finite.')\n for param in trainable_params\n if param.dtype.base_dtype in [tf.float32, tf.float64]\n ]\n check_op = tf.group(*numerical_checks)\n avg_update_op = tf.group(*self._averaging.values())\n train_ops = [train_op]\n if self._check_parameters:\n train_ops.append(check_op)\n if self._use_averaging:\n train_ops.append(avg_update_op)\n nodes['train_op'] = tf.group(*train_ops, name='train_op')\n return nodes\n\n def AddSaver(self, slim_model=False):\n \"\"\"Adds ops to save and restore model parameters.\n\n Args:\n slim_model: whether only averaged variables are saved.\n\n Returns:\n the saver object.\n \"\"\"\n # We have to put the save op in the root scope otherwise running\n # \"save/restore_all\" won't find the \"save/Const\" node it expects.\n with tf.name_scope(None):\n variables_to_save = self.params.copy()\n variables_to_save.update(self.variables)\n if slim_model:\n for key in variables_to_save.keys():\n if not key.endswith('avg_var'):\n del variables_to_save[key]\n self.saver = tf.train.Saver(variables_to_save)\n return self.saver\n"
] |
[
[
"tensorflow.convert_to_tensor",
"tensorflow.get_variable",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.concat",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.nn.l2_loss",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.group",
"tensorflow.add_n",
"tensorflow.check_numerics",
"tensorflow.train.exponential_decay",
"tensorflow.train.MomentumOptimizer",
"tensorflow.add",
"tensorflow.name_scope",
"tensorflow.train.Saver",
"tensorflow.random_normal_initializer",
"tensorflow.python.ops.state_ops.init_variable",
"tensorflow.nn.xw_plus_b",
"tensorflow.shape",
"tensorflow.identity",
"tensorflow.nn.embedding_lookup",
"tensorflow.size",
"tensorflow.constant",
"tensorflow.nn.relu_layer",
"tensorflow.reshape",
"tensorflow.assign",
"tensorflow.ones",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.constant_initializer"
]
] |
ylavinia/pulearning632
|
[
"b4ed5e721aee85c9aa8375bf817e64237f6b298d"
] |
[
"project/detect_modality.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport seaborn as sns\nfrom sklearn import *\nimport glob, sys, os\nfrom pathlib import Path\nfrom src.utils import get_proj_root\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef find_peaks(df, col_name_str, min_peak_fraction):\n \"\"\"\n Find peak points and indices via KDE\n \n :param df: DataFrame containing the desired column\n\n :param col_name_str: The desired column name in string\n \n :return a list containing the peak indices, the density value of the peaks, KDE x-values, KDE y-values\n \"\"\"\n from scipy.signal import find_peaks, peak_prominences\n plt.figure()\n \n # get the x-values and y-values of the kde plot\n density_x, density_y = sns.kdeplot(df[col_name_str]).get_lines()[0].get_data()\n # want to find peaks that are greater than min_peak_fraction of max peak \n # to exclude peaks that are too small\n min_peak = min_peak_fraction * max(density_y)\n # find peaks with density more than min_peak\n peaks, _ = find_peaks(x=density_y, prominence=min_peak)\n prominences = peak_prominences(density_y, peaks)[0]\n x_peaks = [density_x[idx] for idx in peaks]\n return peaks, x_peaks, density_x, density_y\n\n\ndef find_peaks_no_header(df, col_num, min_peak_fraction):\n \"\"\"\n Find peak points and indices via KDE\n \n :param df: DataFrame containing the desired column\n\n :param col_name_str: The desired column name in string\n \n :return a list containing the peak indices, the density value of the peaks, KDE x-values, KDE y-values\n \"\"\"\n from scipy.signal import find_peaks, peak_prominences\n plt.figure()\n # get the x-values and y-values of the kde plot\n density_x, density_y = sns.kdeplot(df.iloc[col_num]).get_lines()[0].get_data() \n # want to find peaks that are greater than min_peak_fraction of max peak \n # to exclude peaks that are too small\n min_peak = min_peak_fraction * max(density_y)\n # find peaks with density more than min_peak\n peaks, _ = find_peaks(x=density_y, prominence=min_peak)\n prominences = peak_prominences(density_y, peaks)[0]\n x_peaks = [density_x[idx] for idx in peaks]\n return peaks, x_peaks, density_x, density_y\n\n\n\ndef find_lowest_points_between_peaks_no_header(df, col_num, min_peak_fraction=0.1):\n \"\"\"\n Search for the lowest points between peaks, store the points in a list\n \n :param: df: DataFrame containing the desired column\n\n :param col_name_str: The desired column name in string\n \n :param min_peak_fraction: float between 0.0 and 1.0 to specify peak size to be considered a peak\n\n :return a list containing the lowest points\n \"\"\"\n # find the peaks\n peaks, x_peaks, density_x, density_y = find_peaks_no_header(df, col_num, min_peak_fraction)\n valley_indices = []\n valley_points = []\n for i in range(len(peaks)-1):\n peak = peaks[i]\n next_peak = peaks[i+1]\n valley_idx = peak + np.argmin(density_y[peak:next_peak+1])\n valley_indices.append(valley_idx)\n valley_points.append(density_x[valley_idx])\n \n # plot the density\n plt.plot(density_x, density_y)\n # plot the peaks\n plt.plot(x_peaks, density_y[peaks], \"x\")\n # plot the lowest point between the peaks\n plt.plot(valley_points, density_y[valley_indices], \"o\", c=\"red\")\n # plot a vertical line for the peaks\n plt.vlines(x=x_peaks, ymin=0, ymax=density_y[peaks])\n # plot a vertical line for the lowest point\n plt.vlines(x=valley_points, ymin=0, ymax=density_y[valley_indices])\n plt.title(\"Density Plot with Peaks and Lowest Point Between Peaks\")\n plt.ylabel(\"Density\")\n plt.xlabel(\"RTT (ms)\")\n \n return valley_points\n\n\n\ndef find_lowest_points_between_peaks(df, col_name_str, min_peak_fraction=0.1):\n \"\"\"\n Search for the lowest points between peaks, store the points in a list\n \n :param: df: DataFrame containing the desired column\n\n :param col_name_str: The desired column name in string\n \n :param min_peak_fraction: float between 0.0 and 1.0 to specify peak size to be considered a peak\n\n :return a list containing the lowest points\n \"\"\"\n # find the peaks\n peaks, x_peaks, density_x, density_y = find_peaks(df, col_name_str, min_peak_fraction)\n valley_indices = []\n valley_points = []\n for i in range(len(peaks)-1):\n peak = peaks[i]\n next_peak = peaks[i+1]\n valley_idx = peak + np.argmin(density_y[peak:next_peak+1])\n valley_indices.append(valley_idx)\n valley_points.append(density_x[valley_idx])\n \n # plot the density\n plt.plot(density_x, density_y)\n # plot the peaks\n plt.plot(x_peaks, density_y[peaks], \"x\")\n # plot the lowest point between the peaks\n plt.plot(valley_points, density_y[valley_indices], \"o\", c=\"red\")\n # plot a vertical line for the peaks\n plt.vlines(x=x_peaks, ymin=0, ymax=density_y[peaks])\n # plot a vertical line for the lowest point\n plt.vlines(x=valley_points, ymin=0, ymax=density_y[valley_indices])\n plt.title(\"Density Plot with Peaks and Lowest Point Between Peaks\")\n plt.ylabel(\"Density\")\n plt.xlabel(\"RTT (ms)\")\n \n return valley_points \n\ndef split_2_peaks(df, col_name_str, valley_points):\n '''\n In case of bimodal data, split data into 2 subsets with the valley point as the separator\n\n :param: df: DataFrame containing the desired column\n\n :param col_name_str: The desired column name in string\n\n :param valley_points: A list containing the lowest point between peaks on the x-axis\n\n :return List of two DataFrames separated by the valley_points\n '''\n df_1 = df[df[col_name_str] < valley_points[0]]\n df_2 = df[df[col_name_str] >= valley_points[0]]\n return [df_1, df_2]\n\n\n\n\ndef split_2_peaks_no_header(df, col_num, valley_points):\n '''\n In case of bimodal data, split data into 2 subsets with the valley point as the separator\n\n :param: df: DataFrame containing the desired column\n\n :param col_name_str: The desired column name in string\n\n :param valley_points: A list containing the lowest point between peaks on the x-axis\n\n :return List of two DataFrames separated by the valley_points\n '''\n df_1 = df[df.iloc[col_num] < valley_points[0]]\n df_2 = df[df.iloc[col_num] >= valley_points[0]]\n return [df_1, df_2] \n\n\ndef divide_data_based_on_peaks(df, col_name_str, valley_points):\n \"\"\"\n Split data at the lowest points to create unimodal subsets\n \n :param: df: DataFrame containing the desired column\n\n :param col_name_str: The desired column name in string\n\n :param valley_points: A list containing the lowest point on the x-axis\n \n :return DataFrame of the subsets\n \"\"\"\n subset_list = []\n num_valleys = len(valley_points)\n \n num_peaks = num_valleys + 1\n\n # if data has no valley, thus it is unimodal\n if num_valleys == 0:\n subset_list.append(df)\n # if data has 1 valley and 2 peaks (bimodal)\n elif num_valleys == 1:\n two_subsets_list = split_2_peaks(df, col_name_str=col_name_str, valley_points=valley_points)\n subset_list.extend(two_subsets_list)\n # if there are at least 2 valleys (at least 3 peaks)\n else:\n # the first subset is always the data less than the first valley point\n df_first = df[df[col_name_str] < valley_points[0]]\n subset_list.append(df_first)\n \n i = 0\n # remember num_valleys is at least 2\n while (i < num_valleys-1):\n df_subset = df[(df[col_name_str] >= valley_points[i]) & (df[col_name_str] < valley_points[i+1])]\n subset_list.append(df_subset)\n i += 1\n\n # the last subset is always the data greater than the last valley point\n df_last = df[df[col_name_str] >= valley_points[-1]] \n subset_list.append(df_last) \n\n return subset_list\n\n\n\ndef divide_data_based_on_peaks_no_header(df, col_num, valley_points):\n \"\"\"\n Split data at the lowest points to create unimodal subsets\n \n :param: df: DataFrame containing the desired column\n\n :param col_name_str: The desired column name in string\n\n :param valley_points: A list containing the lowest point on the x-axis\n \n :return DataFrame of the subsets\n \"\"\"\n subset_list = []\n num_valleys = len(valley_points)\n \n num_peaks = num_valleys + 1\n\n # if data has no valley, thus it is unimodal\n if num_valleys == 0:\n subset_list.append(df)\n # if data has 1 valley and 2 peaks (bimodal)\n elif num_valleys == 1:\n two_subsets_list = split_2_peaks_no_header(df, col_num=col_num, valley_points=valley_points)\n subset_list.extend(two_subsets_list)\n # if there are at least 2 valleys (at least 3 peaks)\n else:\n # the first subset is always the data less than the first valley point\n df_first = df[df.iloc[col_num] < valley_points[0]]\n subset_list.append(df_first)\n \n i = 0\n # remember num_valleys is at least 2\n while (i < num_valleys-1):\n df_subset = df[(df.iloc[col_num] >= valley_points[i]) & (df.iloc[col_num] < valley_points[i+1])]\n subset_list.append(df_subset)\n i += 1\n\n # the last subset is always the data greater than the last valley point\n df_last = df[df.iloc[col_num] >= valley_points[-1]] \n subset_list.append(df_last) \n\n return subset_list\n\n\n\n"
] |
[
[
"scipy.signal.find_peaks",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"scipy.signal.peak_prominences",
"numpy.argmin",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.vlines",
"matplotlib.pyplot.figure"
]
] |
UKPLab/coling2018-graph-neural-networks-question-answering
|
[
"389558d6570195debea570834944507de4f21d65"
] |
[
"questionanswering/models/lexical_baselines.py"
] |
[
"import torch\nfrom torch import nn as nn\n\nfrom questionanswering.models import modules\nfrom questionanswering.models.modules import batchmv_cosine_similarity\n\n\nclass OneEdgeModel(nn.Module):\n\n def __init__(self,\n tokens_encoder=None,\n **kwargs\n ):\n super(OneEdgeModel, self).__init__()\n if tokens_encoder is None:\n tokens_encoder = modules.ConvWordsEncoder(**kwargs)\n self._tokens_encoder: nn.Module = tokens_encoder\n\n def forward(self, questions_m, graphs_m):\n\n question_vector1 = self._tokens_encoder(questions_m)\n\n edge_vectors1 = self._tokens_encoder(graphs_m.view(-1, graphs_m.size(-1)))\n edge_vectors1 = edge_vectors1.view(-1, graphs_m.size(1), edge_vectors1.size(-1))\n\n # Batch cosine similarity\n predictions = batchmv_cosine_similarity(edge_vectors1, question_vector1)\n\n return predictions\n\n\nclass STAGGModel(nn.Module):\n\n def __init__(self,\n tokens_encoder=None,\n **kwargs\n ):\n super(STAGGModel, self).__init__()\n if tokens_encoder is None:\n tokens_encoder = modules.ConvWordsEncoder(**kwargs)\n self._tokens_encoder: nn.Module = tokens_encoder\n self._weights_layer = nn.Sequential(nn.Linear(in_features=9,\n out_features=1),\n nn.ReLU()\n )\n\n def forward(self, questions_m, graphs_m, graphs_features_m):\n graphs_features_m = graphs_features_m.float()\n\n question_vector1 = self._tokens_encoder(questions_m[..., 0, :])\n question_vector2 = self._tokens_encoder(questions_m[..., 1, :])\n\n edge_vectors1 = self._tokens_encoder(graphs_m[..., 0, :]\n .contiguous()\n .view(-1, graphs_m.size(-1)))\n edge_vectors2 = self._tokens_encoder(graphs_m[..., 1, :]\n .contiguous()\n .view(-1, graphs_m.size(-1)))\n edge_vectors1 = edge_vectors1.view(-1, graphs_m.size(1), edge_vectors1.size(-1))\n edge_vectors2 = edge_vectors2.view(-1, graphs_m.size(1), edge_vectors2.size(-1))\n\n # Batch cosine similarity\n predictions1 = batchmv_cosine_similarity(edge_vectors1, question_vector1)\n predictions2 = batchmv_cosine_similarity(edge_vectors2, question_vector2)\n\n graphs_features_m = torch.cat((predictions1.unsqueeze(2), predictions2.unsqueeze(2), graphs_features_m), dim=-1)\n predictions = self._weights_layer(graphs_features_m).squeeze(-1)\n\n return predictions\n\n\nclass PooledEdgesModel(nn.Module):\n\n def __init__(self,\n tokens_encoder=None,\n **kwargs\n ):\n super(PooledEdgesModel, self).__init__()\n if tokens_encoder is None:\n tokens_encoder = modules.ConvWordsEncoder(**kwargs)\n self._tokens_encoder: nn.Module = tokens_encoder\n self._pool = self._tokens_encoder._pool\n\n def forward(self, questions_m, graphs_m, *args):\n question_vector = self._tokens_encoder(questions_m)\n edge_vectors = graphs_m.view(-1, graphs_m.size(-1))\n\n edge_vectors = self._tokens_encoder(edge_vectors)\n edge_vectors = edge_vectors.view(-1, graphs_m.size(-2), edge_vectors.size(-1))\\\n .transpose(-1, -2).contiguous()\n edge_vectors = self._pool(edge_vectors).squeeze(-1)\n edge_vectors = edge_vectors.view(-1, graphs_m.size(1), edge_vectors.size(-1))\n\n # Batch cosine similarity\n predictions = batchmv_cosine_similarity(edge_vectors, question_vector)\n\n return predictions\n\n\n\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.ReLU"
]
] |
ali4413/Ali-Mehrabifard
|
[
"0b319934299f42460b789a0627f553181d95468a"
] |
[
"exercises/solution_01_05.py"
] |
[
"# Import pandas \nimport pandas as pd\n\n# Read in the dataset \nhockey_players = pd.read_csv('data/canucks.csv', index_col=0)\n\n\n# Display the dataframe\nhockey_players.head()"
] |
[
[
"pandas.read_csv"
]
] |
aelwan/pyemu
|
[
"482cedeb637982e6bd5fc45babd9c95922d90dd2"
] |
[
"pyemu/pst/pst_handler.py"
] |
[
"\nfrom __future__ import print_function, division\nimport os\nimport re\nimport copy\nimport warnings\nimport numpy as np\nimport pandas as pd\npd.options.display.max_colwidth = 100\nimport pyemu\nfrom ..pyemu_warnings import PyemuWarning\nfrom pyemu.pst.pst_controldata import ControlData, SvdData, RegData\nfrom pyemu.pst import pst_utils\nfrom pyemu.plot import plot_utils\n#from pyemu.utils.os_utils import run\n\nclass Pst(object):\n \"\"\"All things PEST(++) control file\n\n Args:\n filename (`str`): the name of the control file\n load (`bool`, optional): flag to load the control file. Default is True\n resfile (`str`, optional): corresponding residual file. If `None`, a residual file\n with the control file base name is sought. Default is `None`\n\n Note:\n This class is the primary mechanism for dealing with PEST control files. Support is provided\n for constructing new control files as well as manipulating existing control files.\n\n Example::\n\n pst = pyemu.Pst(\"my.pst\")\n pst.control_data.noptmax = -1\n pst.write(\"my_new.pst\")\n\n \"\"\"\n def __init__(self, filename, load=True, resfile=None):\n\n self.parameter_data = None\n \"\"\"pandas.DataFrame: '* parameter data' information. Columns are \n standard PEST variable names\n \n Example::\n \n pst.parameter_data.loc[:,\"partrans\"] = \"log\"\n pst.parameter_data.loc[:,\"parubnd\"] = 10.0\n \n \"\"\"\n self.observation_data = None\n \"\"\"pandas.DataFrame: '* observation data' information. Columns are standard PEST\n variable names\n \n Example::\n \n pst.observation_data.loc[:,\"weight\"] = 1.0\n pst.observation_data.loc[:,\"obgnme\"] = \"obs_group\"\n \n \"\"\"\n self.prior_information = None\n \"\"\"pandas.DataFrame: '* prior information' data. Columns are standard PEST\n variable names\"\"\"\n\n self.filename = filename\n self.resfile = resfile\n self.__res = None\n self.__pi_count = 0\n self.with_comments = False\n self.comments = {}\n self.other_sections = {}\n self.new_filename = None\n for key,value in pst_utils.pst_config.items():\n self.__setattr__(key,copy.copy(value))\n #self.tied = None\n self.control_data = ControlData()\n \"\"\"pyemu.pst.pst_controldata.ControlData: '* control data' information. \n Access with standard PEST variable names \n \n Example:: \n \n pst.control_data.noptmax = 2\n pst.control_data.pestmode = \"estimation\"\n \n \n \"\"\"\n self.svd_data = SvdData()\n \"\"\"pyemu.pst.pst_controldata.SvdData: '* singular value decomposition' section information. \n Access with standard PEST variable names\n \n Example::\n \n pst.svd_data.maxsing = 100\n \n \n \"\"\"\n self.reg_data = RegData()\n \"\"\"pyemu.pst.pst_controldata.RegData: '* regularization' section information.\n Access with standard PEST variable names.\n \n Example:: \n \n pst.reg_data.phimlim = 1.00 #yeah right!\n\n \n \"\"\"\n self._version = 1\n if load:\n if not os.path.exists(filename):\n raise Exception(\"pst file not found:{0}\".format(filename))\n\n self.load(filename)\n\n def __setattr__(self, key, value):\n if key == \"model_command\":\n if isinstance(value, str):\n value = [value]\n super(Pst,self).__setattr__(key,value)\n\n\n @classmethod\n def from_par_obs_names(cls,par_names=[\"par1\"],obs_names=[\"obs1\"]):\n \"\"\"construct a shell `Pst` instance from parameter and observation names\n\n Args:\n par_names ([`str`]): list of parameter names. Default is [`par1`]\n obs_names ([`str`]): list of observation names. Default is [`obs1`]\n\n Note:\n While this method works, it does not make template or instruction files.\n Users are encouraged to use `Pst.from_io_files()` for more usefulness\n\n Example::\n\n par_names = [\"par1\",\"par2\"]\n obs_names = [\"obs1\",\"obs2\"]\n pst = pyemu.Pst.from_par_obs_names(par_names,obs_names)\n\n \"\"\"\n return pst_utils.generic_pst(par_names=par_names,obs_names=obs_names)\n\n @property\n def phi(self):\n \"\"\"get the weighted total objective function.\n\n Returns:\n `float`: sum of squared residuals\n\n Note:\n Requires `Pst.res` (the residuals file) to be available\n\n \"\"\"\n psum = 0.0\n for _, contrib in self.phi_components.items():\n psum += contrib\n return psum\n\n @property\n def phi_components(self):\n \"\"\" get the individual components of the total objective function\n\n Returns:\n `dict`: dictionary of observation group, contribution to total phi\n\n\n Note:\n Requires `Pst.res` (the residuals file) to be available\n\n \"\"\"\n\n # calculate phi components for each obs group\n components = {}\n ogroups = self.observation_data.groupby(\"obgnme\").groups\n rgroups = self.res.groupby(\"group\").groups\n self.res.index = self.res.name\n for og,onames in ogroups.items():\n #assert og in rgroups.keys(),\"Pst.phi_componentw obs group \" +\\\n # \"not found: \" + str(og)\n #og_res_df = self.res.ix[rgroups[og]]\n og_res_df = self.res.loc[onames,:].dropna(axis=1)\n #og_res_df.index = og_res_df.name\n og_df = self.observation_data.loc[ogroups[og],:]\n og_df.index = og_df.obsnme\n #og_res_df = og_res_df.loc[og_df.index,:]\n assert og_df.shape[0] == og_res_df.shape[0],\\\n \" Pst.phi_components error: group residual dataframe row length\" +\\\n \"doesn't match observation data group dataframe row length\" + \\\n str(og_df.shape) + \" vs. \" + str(og_res_df.shape) + \",\" + og\n if \"modelled\" not in og_res_df.columns:\n print(og_res_df)\n m = self.res.loc[onames,\"modelled\"]\n print(m.loc[m.isna()])\n raise Exception(\"'modelled' not in res df columns for group \"+og)\n # components[og] = np.sum((og_res_df[\"residual\"] *\n # og_df[\"weight\"]) ** 2)\n components[og] = np.sum(((og_df.loc[:,\"obsval\"] - og_res_df.loc[og_df.obsnme,\"modelled\"]) *\n og_df.loc[:,\"weight\"]) ** 2)\n if not self.control_data.pestmode.startswith(\"reg\") and \\\n self.prior_information.shape[0] > 0:\n ogroups = self.prior_information.groupby(\"obgnme\").groups\n for og in ogroups.keys():\n if og not in rgroups.keys():\n raise Exception(\"Pst.adjust_weights_res() obs group \" +\\\n \"not found: \" + str(og))\n og_res_df = self.res.loc[rgroups[og],:]\n og_res_df.index = og_res_df.name\n og_df = self.prior_information.loc[ogroups[og],:]\n og_df.index = og_df.pilbl\n og_res_df = og_res_df.loc[og_df.index,:]\n if og_df.shape[0] != og_res_df.shape[0]:\n raise Exception(\" Pst.phi_components error: group residual dataframe row length\" +\\\n \"doesn't match observation data group dataframe row length\" + \\\n str(og_df.shape) + \" vs. \" + str(og_res_df.shape))\n components[og] = np.sum((og_res_df[\"residual\"] *\n og_df[\"weight\"]) ** 2)\n\n return components\n\n @property\n def phi_components_normalized(self):\n \"\"\" get the individual components of the total objective function\n normalized to the total PHI being 1.0\n\n Returns:\n `dict`: dictionary of observation group,\n normalized contribution to total phi\n\n Note:\n Requires `Pst.res` (the residuals file) to be available\n\n\n \"\"\"\n # use a dictionary comprehension to go through and normalize each component of phi to the total\n phi_components_normalized = {i: self.phi_components[i]/self.phi for i in self.phi_components}\n return phi_components_normalized\n\n def set_res(self,res):\n \"\"\" reset the private `Pst.res` attribute.\n\n Args:\n res : (`pandas.DataFrame` or `str`): something to use as Pst.res attribute.\n If `res` is `str`, a dataframe is read from file `res`\n\n\n \"\"\"\n if isinstance(res,str):\n res = pst_utils.read_resfile(res)\n self.__res = res\n\n @property\n def res(self):\n \"\"\"get the residuals dataframe attribute\n\n Returns:\n `pandas.DataFrame`: a dataframe containing the\n residuals information.\n\n Note:\n if the Pst.__res attribute has not been loaded,\n this call loads the res dataframe from a file\n\n \"\"\"\n if self.__res is not None:\n return self.__res\n else:\n if self.resfile is not None:\n if not os.path.exists(self.resfile):\n raise Exception(\"Pst.res: self.resfile \" +\\\n str(self.resfile) + \" does not exist\")\n else:\n self.resfile = self.filename.replace(\".pst\", \".res\")\n if not os.path.exists(self.resfile):\n self.resfile = self.resfile.replace(\".res\", \".rei\")\n if not os.path.exists(self.resfile):\n self.resfile = self.resfile.replace(\".rei\", \".base.rei\")\n if not os.path.exists(self.resfile):\n if self.new_filename is not None:\n self.resfile = self.new_filename.replace(\".pst\",\".res\")\n if not os.path.exists(self.resfile):\n self.resfile = self.resfile.replace(\".res\",\"rei\")\n if not os.path.exists(self.resfile):\n raise Exception(\"Pst.res: \" +\n \"could not residual file case.res\" +\n \" or case.rei\" + \" or case.base.rei\" +\n \" or case.obs.csv\")\n\n\n res = pst_utils.read_resfile(self.resfile)\n missing_bool = self.observation_data.obsnme.apply\\\n (lambda x: x not in res.name)\n missing = self.observation_data.obsnme[missing_bool]\n if missing.shape[0] > 0:\n raise Exception(\"Pst.res: the following observations \" +\n \"were not found in \" +\n \"{0}:{1}\".format(self.resfile,','.join(missing)))\n self.__res = res\n return self.__res\n\n @property\n def nprior(self):\n \"\"\"number of prior information equations\n\n Returns:\n `int`: the number of prior info equations\n\n \"\"\"\n self.control_data.nprior = self.prior_information.shape[0]\n return self.control_data.nprior\n\n @property\n def nnz_obs(self):\n \"\"\" get the number of non-zero weighted observations\n\n Returns:\n `int`: the number of non-zeros weighted observations\n\n \"\"\"\n nnz = 0\n for w in self.observation_data.weight:\n if w > 0.0:\n nnz += 1\n return nnz\n\n\n @property\n def nobs(self):\n \"\"\"get the number of observations\n\n Returns:\n `int`: the number of observations\n\n \"\"\"\n self.control_data.nobs = self.observation_data.shape[0]\n return self.control_data.nobs\n\n\n @property\n def npar_adj(self):\n \"\"\"get the number of adjustable parameters (not fixed or tied)\n\n Returns:\n `int`: the number of adjustable parameters\n\n \"\"\"\n pass\n np = 0\n for t in self.parameter_data.partrans:\n if t not in [\"fixed\", \"tied\"]:\n np += 1\n return np\n\n\n @property\n def npar(self):\n \"\"\"get number of parameters\n\n Returns:\n `int`: the number of parameters\n\n \"\"\"\n self.control_data.npar = self.parameter_data.shape[0]\n return self.control_data.npar\n\n @property\n def forecast_names(self):\n \"\"\"get the forecast names from the pestpp options (if any).\n Returns None if no forecasts are named\n\n Returns:\n [`str`]: a list of forecast names.\n\n \"\"\"\n if \"forecasts\" in self.pestpp_options.keys():\n return self.pestpp_options[\"forecasts\"].lower().split(',')\n elif \"predictions\" in self.pestpp_options.keys():\n return self.pestpp_options[\"predictions\"].lower().split(',')\n else:\n return None\n\n @property\n def obs_groups(self):\n \"\"\"get the observation groups\n\n Returns:\n [`str`]: a list of unique observation groups\n\n \"\"\"\n og = self.observation_data.obgnme.unique().tolist()\n return og\n\n @property\n def nnz_obs_groups(self):\n \"\"\" get the observation groups that contain at least one non-zero weighted\n observation\n\n Returns:\n [`str`]: a list of observation groups that contain at\n least one non-zero weighted observation\n\n \"\"\"\n obs = self.observation_data\n og = obs.loc[obs.weight>0.0,\"obgnme\"].unique().tolist()\n return og\n\n @property\n def adj_par_groups(self):\n \"\"\"get the parameter groups with atleast one adjustable parameter\n\n Returns:\n [`str`]: a list of parameter groups with\n at least one adjustable parameter\n\n \"\"\"\n par = self.parameter_data\n tf = set([\"tied\", \"fixed\"])\n adj_pargp = par.loc[par.partrans.apply(lambda x: x not in tf), \"pargp\"].unique()\n return adj_pargp.tolist()\n\n\n @property\n def par_groups(self):\n \"\"\"get the parameter groups\n\n Returns:\n [`str`]: a list of parameter groups\n\n \"\"\"\n return self.parameter_data.pargp.unique().tolist()\n\n\n @property\n def prior_groups(self):\n \"\"\"get the prior info groups\n\n Returns:\n [`str`]: a list of prior information groups\n\n \"\"\"\n og = self.prior_information.obgnme.unique().tolist()\n return og\n\n @property\n def prior_names(self):\n \"\"\" get the prior information names\n\n Returns:\n [`str`]: a list of prior information names\n\n \"\"\"\n return self.prior_information.pilbl.tolist()\n\n @property\n def par_names(self):\n \"\"\"get the parameter names\n\n Returns:\n [`str`]: a list of parameter names\n \"\"\"\n return self.parameter_data.parnme.tolist()\n\n @property\n def adj_par_names(self):\n \"\"\" get the adjustable (not fixed or tied) parameter names\n\n Returns:\n [`str`]: list of adjustable (not fixed or tied)\n parameter names\n\n \"\"\"\n par = self.parameter_data\n tf = set([\"tied\",\"fixed\"])\n adj_names = par.loc[par.partrans.apply(lambda x: x not in tf),\"parnme\"]\n return adj_names.tolist()\n\n @property\n def obs_names(self):\n \"\"\"get the observation names\n\n Returns:\n [`str`]: a list of observation names\n\n \"\"\"\n return self.observation_data.obsnme.tolist()\n\n @property\n def nnz_obs_names(self):\n \"\"\"get the non-zero weight observation names\n\n Returns:\n [`str`]: a list of non-zero weighted observation names\n\n \"\"\"\n obs = self.observation_data\n nz_names = obs.loc[obs.weight>0.0,\"obsnme\"]\n return nz_names.tolist()\n\n @property\n def zero_weight_obs_names(self):\n \"\"\" get the zero-weighted observation names\n\n Returns:\n [`str`]: a list of zero-weighted observation names\n\n \"\"\"\n obs = self.observation_data\n return obs.loc[obs.weight==0.0,\"obsnme\"].tolist()\n\n\n @property\n def estimation(self):\n \"\"\" check if the control_data.pestmode is set to estimation\n\n Returns:\n `bool`: True if `control_data.pestmode` is estmation, False otherwise\n\n \"\"\"\n return self.control_data.pestmode == \"estimation\"\n\n @property\n def tied(self):\n \"\"\" list of tied parameter names\n\n Returns:\n `pandas.DataFrame`: a dataframe of tied parameter information.\n Columns of `tied` are `parnme` and `partied`. Returns `None` if\n no tied parameters are found.\n\n \"\"\"\n par = self.parameter_data\n tied_pars = par.loc[par.partrans==\"tied\",\"parnme\"]\n if tied_pars.shape[0] == 0:\n return None\n if \"partied\" not in par.columns:\n par.loc[:,\"partied\"] = np.NaN\n tied = par.loc[tied_pars,[\"parnme\",\"partied\"]]\n return tied\n\n @staticmethod\n def _read_df(f,nrows,names,converters,defaults=None):\n \"\"\" a private method to read part of an open file into a pandas.DataFrame.\n\n Args:\n f (`file`): open file handle\n nrows (`int`): number of rows to read\n names ([`str`]): names to set the columns of the dataframe with\n converters (`dict`): dictionary of lambda functions to convert strings\n to numerical format\n defaults (`dict`): dictionary of default values to assign columns.\n Default is None\n\n Returns:\n `pandas.DataFrame`: dataframe of control file section info\n\n \"\"\"\n seek_point = f.tell()\n line = f.readline()\n raw = line.strip().split()\n if raw[0].lower() == \"external\":\n filename = raw[1]\n if not os.path.exists(filename):\n raise Exception(\"Pst._read_df() error: external file '{0}' not found\".format(filename))\n df = pd.read_csv(filename,index_col=False,comment='#')\n df.columns = df.columns.str.lower()\n for name in names:\n if name not in df.columns:\n raise Exception(\"Pst._read_df() error: name\" +\\\n \"'{0}' not in external file '{1}' columns\".format(name,filename))\n if name in converters:\n df.loc[:,name] = df.loc[:,name].apply(converters[name])\n if defaults is not None:\n for name in names:\n df.loc[:, name] = df.loc[:, name].fillna(defaults[name])\n else:\n if nrows is None:\n raise Exception(\"Pst._read_df() error: non-external sections require nrows\")\n f.seek(seek_point)\n df = pd.read_csv(f, header=None,names=names,\n nrows=nrows,delim_whitespace=True,\n converters=converters, index_col=False,\n comment='#')\n\n # in case there was some extra junk at the end of the lines\n if df.shape[1] > len(names):\n df = df.iloc[:,len(names)]\n df.columns = names\n\n if defaults is not None:\n for name in names:\n df.loc[:,name] = df.loc[:,name].fillna(defaults[name])\n\n elif np.any(pd.isnull(df).values.flatten()):\n raise Exception(\"NANs found\")\n f.seek(seek_point)\n extras = []\n for _ in range(nrows):\n line = f.readline()\n extra = np.NaN\n if '#' in line:\n raw = line.strip().split('#')\n extra = ' # '.join(raw[1:])\n extras.append(extra)\n\n df.loc[:,\"extra\"] = extras\n\n return df\n\n\n def _read_line_comments(self,f,forgive):\n comments = []\n while True:\n line = f.readline().lower().strip()\n self.lcount += 1\n if line == '':\n if forgive:\n line = None\n break\n else:\n raise Exception(\"unexpected EOF\")\n if line.startswith(\"++\"):\n self._parse_pestpp_line(line)\n elif line.startswith('#'):\n comments.append(line.strip())\n else:\n break\n return line, comments\n\n\n def _read_section_comments(self,f,forgive):\n lines = []\n section_comments = []\n while True:\n line,comments = self._read_line_comments(f,forgive)\n section_comments.extend(comments)\n if line is None or line.startswith(\"*\"):\n break\n lines.append(line)\n return line,lines,section_comments\n\n @staticmethod\n def _cast_df_from_lines(name,lines, fieldnames, converters, defaults):\n raw = lines[0].strip().split()\n if raw[0].lower() == \"external\":\n filename = raw[1]\n #check for other items\n if len(raw) > 2:\n for arg in raw[2:]:\n if \"header\" in arg.lower():\n rraw = arg.split('=')\n if len(rraw) != 2:\n raise Exception(\"rraw != 2\")\n if rraw[1].lower() != \"true\":\n raise NotImplementedError(\"non-header external files not support\")\n else:\n PyemuWarning(\"unsupported external file option found: '{0}', ignoring\".format(arg))\n\n\n if not os.path.exists(filename):\n raise Exception(\"Pst._cast_df_from_lines() error: external file '{0}' not found\".format(filename))\n df = pd.read_csv(filename)\n df.columns = df.columns.str.lower()\n\n else:\n extra = []\n raw = []\n for line in lines:\n\n if '#' in line:\n er = line.strip().split('#')\n extra.append('#'.join(er[1:]))\n r = er[0].split()\n else:\n r = line.strip().split()\n extra.append(np.NaN)\n raw.append(r)\n found_fieldnames = fieldnames[:len(raw[0])]\n df = pd.DataFrame(raw,columns=found_fieldnames)\n df.loc[:, \"extra\"] = extra\n for col in fieldnames:\n if col not in df.columns:\n df.loc[:,col] = np.NaN\n if col in fieldnames:\n df.loc[:, col] = df.loc[:, col].fillna(defaults[col])\n if col in converters:\n\n df.loc[:,col] = df.loc[:,col].apply(converters[col])\n\n return df\n\n\n def _cast_prior_df_from_lines(self,lines):\n if lines[0].strip().split()[0].lower() == \"external\":\n filename = lines[0].strip().split()[1]\n if not os.path.exists(filename):\n raise Exception(\"Pst._cast_prior_df_from_lines() error: external file\" +\\\n \"'{0}' not found\".format(filename))\n df = pd.read_csv(filename)\n df.columns = df.columns.str.lower()\n for field in pst_utils.pst_config[\"prior_fieldnames\"]:\n if field not in df.columns:\n raise Exception(\"Pst._cast_prior_df_from_lines() error: external file\" +\\\n \"'{0}' missing required field '{1}'\".format(filename,field))\n self.prior_information = df\n self.prior_information.index = self.prior_information.pilbl\n\n\n else:\n pilbl, obgnme, weight, equation = [], [], [], []\n extra = []\n for line in lines:\n if '#' in line:\n er = line.split('#')\n raw = er[0].split()\n extra.append('#'.join(er[1:]))\n else:\n extra.append(np.NaN)\n raw = line.split()\n pilbl.append(raw[0].lower())\n obgnme.append(raw[-1].lower())\n weight.append(float(raw[-2]))\n eq = ' '.join(raw[1:-2])\n equation.append(eq)\n\n self.prior_information = pd.DataFrame({\"pilbl\": pilbl,\n \"equation\": equation,\n \"weight\": weight,\n \"obgnme\": obgnme})\n self.prior_information.index = self.prior_information.pilbl\n self.prior_information.loc[:,\"extra\"] = extra\n\n\n def _load_version2(self,filename):\n \"\"\"load a version 2 control file\n\n\n\n \"\"\"\n self.lcount = 0\n self.comments = {}\n self.prior_information = self.null_prior\n assert os.path.exists(filename), \"couldn't find control file {0}\".format(filename)\n f = open(filename, 'r')\n last_section = \"\"\n req_sections = {\"* parameter data\":False, \"* observation data\":False,\n \"* model command line\":False,\"* model input\":False, \"* model output\":False}\n while True:\n\n next_section, section_lines, comments = self._read_section_comments(f, True)\n\n\n if \"* control data\" in last_section.lower():\n req_sections[last_section] = True\n iskeyword = False\n if \"keyword\" in last_section.lower():\n iskeyword = True\n self.pestpp_options = self.control_data.parse_values_from_lines(section_lines, iskeyword=iskeyword)\n if len(self.pestpp_options) > 0:\n ppo = self.pestpp_options\n svd_opts = [\"svdmode\",\"eigthresh\",\"maxsing\",\"eigwrite\"]\n for svd_opt in svd_opts:\n if svd_opt in ppo:\n self.svd_data.__setattr__(svd_opt, ppo.pop(svd_opt))\n for reg_opt in self.reg_data.should_write:\n if reg_opt in ppo:\n self.reg_data.__setattr__(reg_opt, ppo.pop(reg_opt))\n\n elif \"* parameter groups\" in last_section.lower():\n req_sections[last_section] = True\n self.parameter_groups = self._cast_df_from_lines(next_section, section_lines, self.pargp_fieldnames,\n self.pargp_converters, self.pargp_defaults)\n self.parameter_groups.index = self.parameter_groups.pargpnme\n\n elif \"* parameter data\" in last_section.lower():\n req_sections[last_section] = True\n self.parameter_data = self._cast_df_from_lines(next_section, section_lines, self.par_fieldnames,\n self.par_converters, self.par_defaults)\n self.parameter_data.index = self.parameter_data.parnme\n\n elif \"* observation data\" in last_section.lower():\n req_sections[last_section] = True\n self.observation_data = self._cast_df_from_lines(next_section, section_lines, self.obs_fieldnames,\n self.obs_converters, self.obs_defaults)\n self.observation_data.index = self.observation_data.obsnme\n\n elif \"* model command line\" in last_section.lower():\n req_sections[last_section] = True\n for line in section_lines:\n self.model_command.append(line.strip())\n\n elif \"* model input\" in last_section.lower():\n req_sections[last_section] = True\n if section_lines[0].strip().split()[0].lower() == \"external\":\n filename = section_lines[0].strip().split()[1]\n if not os.path.exists(filename):\n raise Exception(\"Pst.flex_load() external template data file '{0}' not found\".format(filename))\n\n df = pd.read_csv(filename)\n df.columns = df.columns.str.lower()\n if \"pest_file\" not in df.columns:\n raise Exception(\"Pst.flex_load() external template data file must have 'pest_file' in columns\")\n if \"model_file\" not in df.columns:\n raise Exception(\"Pst.flex_load() external template data file must have 'model_file' in columns\")\n for pfile,mfile in zip(df.pest_file,df.model_file):\n self.template_files.append(pfile)\n self.input_files.append(mfile)\n\n else:\n for line in section_lines:\n raw = line.split()\n self.template_files.append(raw[0])\n self.input_files.append(raw[1])\n\n elif \"* model output\" in last_section.lower():\n req_sections[last_section] = True\n if section_lines[0].strip().split()[0].lower() == \"external\":\n filename = section_lines[0].strip().split()[1]\n if not os.path.exists(filename):\n raise Exception(\"Pst.flex_load() external instruction data file '{0}' not found\".format(\n filename))\n df = pd.read_csv(filename)\n df.columns = df.columns.str.lower()\n if \"pest_file\" not in df.columns:\n raise Exception(\"Pst.flex_load() external instruction data file must have 'pest_file' in columns\")\n if \"model_file\" not in df.columns:\n raise Exception(\"Pst.flex_load() external instruction data file must have 'model_file' in columns\")\n for pfile, mfile in zip(df.pest_file, df.model_file):\n self.instruction_files.append(pfile)\n self.output_files.append(mfile)\n else:\n for iline, line in enumerate(section_lines):\n raw = line.split()\n self.instruction_files.append(raw[0])\n self.output_files.append(raw[1])\n\n elif \"* prior information\" in last_section.lower():\n req_sections[last_section] = True\n self._cast_prior_df_from_lines(section_lines)\n\n elif len(last_section) > 0:\n print(\"Pst._load_version2() warning: unrecognized section: \", last_section)\n self.comments[last_section] = section_lines\n\n last_section = next_section\n if next_section == None or len(section_lines) == 0:\n break\n\n not_found = []\n for section,found in req_sections.items():\n if not found:\n not_found.append(section)\n if len(not_found) > 0:\n raise Exception(\"Pst._load_version2() error: the following required sections were\"+\\\n \"not found:{0}\".format(\",\".join(not_found)))\n\n\n\n\n def load(self,filename):\n \"\"\" entry point load the pest control file.\n\n Args:\n filename (`str`): pst filename\n\n Note:\n This method is called from the `Pst` construtor unless the `load` arg is `False`.\n\n \"\"\"\n if not os.path.exists(filename):\n raise Exception(\"couldn't find control file {0}\".format(filename))\n f = open(filename, 'r')\n\n while True:\n line = f.readline()\n if line == \"\":\n raise Exception(\"Pst.load() error: EOF when trying to find first line - #sad\")\n if line.strip().split()[0].lower() == \"pcf\":\n break\n if not line.startswith(\"pcf\"):\n raise Exception(\"Pst.load() error: first noncomment line must start with 'pcf', not '{0}'\".format(line))\n raw = line.strip().split()\n\n if len(raw) > 1 and \"version\" in raw[1].lower():\n raw = raw[1].split('=')\n if len(raw) > 1:\n try:\n self._version = int(raw[1])\n except:\n pass\n if self._version == 1:\n self._load_version1(filename)\n elif self._version == 2:\n self._load_version2(filename)\n else:\n raise Exception(\"Pst.load() error: version must be 1 or 2, not '{0}'\".format(version))\n\n\n\n\n\n def _load_version1(self, filename):\n \"\"\"load a version 1 pest control file information\n\n \"\"\"\n\n f = open(filename, 'r')\n f.readline()\n\n #control section\n line = f.readline()\n\n if \"* control data\" not in line:\n raise Exception(\"Pst.load() error: looking for control\" +\\\n \" data section, found:\" + line)\n iskeyword = False\n if \"keyword\" in line.lower():\n iskeyword = True\n control_lines = []\n while True:\n line = f.readline()\n if line == '':\n raise Exception(\"Pst.load() EOF while \" +\\\n \"reading control data section\")\n if line.startswith('*'):\n break\n control_lines.append(line)\n self.control_data.parse_values_from_lines(control_lines,iskeyword)\n\n\n #anything between control data and SVD\n while True:\n if line == '':\n raise Exception(\"EOF before parameter groups section found\")\n if \"* singular value decomposition\" in line.lower() or\\\n \"* parameter groups\" in line.lower():\n break\n self.other_lines.append(line)\n line = f.readline()\n\n if \"* singular value decomposition\" in line.lower():\n svd_lines = []\n for _ in range(3):\n line = f.readline()\n if line == '':\n raise Exception(\"EOF while reading SVD section\")\n svd_lines.append(line)\n self.svd_data.parse_values_from_lines(svd_lines)\n line = f.readline()\n while True:\n if line == '':\n raise Exception(\"EOF before parameter groups section found\")\n if \"* parameter groups\" in line.lower():\n break\n self.other_lines.append(line)\n line = f.readline()\n\n #parameter data\n if \"* parameter groups\" not in line.lower():\n raise Exception(\"Pst.load() error: looking for parameter\" +\\\n \" group section, found:\" + line)\n #try:\n self.parameter_groups = self._read_df(f,self.control_data.npargp,\n self.pargp_fieldnames,\n self.pargp_converters,\n self.pargp_defaults)\n self.parameter_groups.index = self.parameter_groups.pargpnme\n #except Exception as e:\n # raise Exception(\"Pst.load() error reading parameter groups: {0}\".format(str(e)))\n\n #parameter data\n line = f.readline()\n if \"* parameter data\" not in line.lower():\n raise Exception(\"Pst.load() error: looking for parameter\" +\\\n \" data section, found:\" + line)\n\n try:\n self.parameter_data = self._read_df(f,self.control_data.npar,\n self.par_fieldnames,\n self.par_converters,\n self.par_defaults)\n self.parameter_data.index = self.parameter_data.parnme\n except Exception as e:\n raise Exception(\"Pst.load() error reading parameter data: {0}\".format(str(e)))\n\n # oh the tied parameter bullshit, how do I hate thee\n counts = self.parameter_data.partrans.value_counts()\n if \"tied\" in counts.index:\n # tied_lines = [f.readline().lower().strip().split() for _ in range(counts[\"tied\"])]\n # self.tied = pd.DataFrame(tied_lines,columns=[\"parnme\",\"partied\"])\n # self.tied.index = self.tied.pop(\"parnme\")\n tied = self._read_df(f,counts[\"tied\"],self.tied_fieldnames,\n self.tied_converters)\n tied.index = tied.parnme\n self.parameter_data.loc[:,\"partied\"] = np.NaN\n self.parameter_data.loc[tied.index,\"partied\"] = tied.partied\n\n # obs groups - just read past for now\n\n line = f.readline()\n # assert \"* observation groups\" in line.lower(),\\\n # \"Pst.load() error: looking for obs\" +\\\n # \" group section, found:\" + line\n # [f.readline() for _ in range(self.control_data.nobsgp)]\n if \"* observation groups\" in line:\n while True:\n seekpoint = f.tell()\n line = f.readline()\n if line == \"\":\n raise Exception(\"Pst.load() error: EOF when searching for '* observation data'\")\n if line.startswith(\"*\"):\n f.seek(seekpoint)\n break\n line = f.readline()\n assert \"* observation data\" in line.lower(), \\\n \"Pst.load() error: looking for observation\" + \\\n \" data section, found:\" + line\n else:\n\n if \"* observation data\" not in line.lower():\n raise Exception(\"Pst.load() error: looking for observation\" +\\\n \" data section, found:\" + line)\n\n try:\n self.observation_data = self._read_df(f,self.control_data.nobs,\n self.obs_fieldnames,\n self.obs_converters)\n self.observation_data.index = self.observation_data.obsnme\n except Exception as e:\n raise Exception(\"Pst.load() error reading observation data: {0}\".format(str(e)))\n #model command line\n line = f.readline()\n assert \"* model command line\" in line.lower(),\\\n \"Pst.load() error: looking for model \" +\\\n \"command section, found:\" + line\n for _ in range(self.control_data.numcom):\n self.model_command.append(f.readline().strip())\n\n #model io\n line = f.readline()\n if \"* model input/output\" not in line.lower():\n raise Exception(\"Pst.load() error; looking for model \" +\\\n \" i/o section, found:\" + line)\n\n for i in range(self.control_data.ntplfle):\n raw = f.readline().strip().split()\n self.template_files.append(raw[0])\n self.input_files.append(raw[1])\n for i in range(self.control_data.ninsfle):\n raw = f.readline().strip().split()\n self.instruction_files.append(raw[0])\n self.output_files.append(raw[1])\n\n #prior information - sort of hackish\n if self.control_data.nprior == 0:\n self.prior_information = self.null_prior\n else:\n pilbl, obgnme, weight, equation = [], [], [], []\n line = f.readline()\n if \"* prior information\" not in line.lower():\n raise Exception(\"Pst.load() error; looking for prior \" +\\\n \" info section, found:\" + line)\n for _ in range(self.control_data.nprior):\n line = f.readline()\n if line == '':\n raise Exception(\"EOF during prior information \" +\n \"section\")\n raw = line.strip().split()\n pilbl.append(raw[0].lower())\n obgnme.append(raw[-1].lower())\n weight.append(float(raw[-2]))\n eq = ' '.join(raw[1:-2])\n equation.append(eq)\n self.prior_information = pd.DataFrame({\"pilbl\": pilbl,\n \"equation\": equation,\n \"weight\": weight,\n \"obgnme\": obgnme})\n self.prior_information.index = self.prior_information.pilbl\n if \"regul\" in self.control_data.pestmode:\n line = f.readline()\n if \"* regul\" not in line.lower():\n raise Exception(\"Pst.load() error; looking for regul \" +\\\n \" section, found:\" + line)\n #[self.regul_lines.append(f.readline()) for _ in range(3)]\n regul_lines = [f.readline() for _ in range(3)]\n raw = regul_lines[0].strip().split()\n self.reg_data.phimlim = float(raw[0])\n self.reg_data.phimaccept = float(raw[1])\n raw = regul_lines[1].strip().split()\n self.wfinit = float(raw[0])\n\n\n for line in f:\n if line.strip().startswith(\"++\") and '#' not in line:\n self._parse_pestpp_line(line)\n f.close()\n\n for df in [self.parameter_groups,self.parameter_data,\n self.observation_data,self.prior_information]:\n if \"extra\" in df.columns and df.extra.dropna().shape[0] > 0:\n self.with_comments = False\n break\n return\n\n\n def _parse_pestpp_line(self,line):\n # args = line.replace('++','').strip().split()\n args = line.replace(\"++\", '').strip().split(')')\n args = [a for a in args if a != '']\n # args = ['++'+arg.strip() for arg in args]\n # self.pestpp_lines.extend(args)\n keys = [arg.split('(')[0] for arg in args]\n values = [arg.split('(')[1].replace(')', '') for arg in args if '(' in arg]\n for _ in range(len(values)-1,len(keys)):\n values.append('')\n for key, value in zip(keys, values):\n if key in self.pestpp_options:\n print(\"Pst.load() warning: duplicate pest++ option found:\" + str(key),PyemuWarning)\n self.pestpp_options[key.lower()] = value\n\n def _update_control_section(self):\n \"\"\" private method to synchronize the control section counters with the\n various parts of the control file. This is usually called during the\n Pst.write() method.\n\n \"\"\"\n self.control_data.npar = self.npar\n self.control_data.nobs = self.nobs\n self.control_data.npargp = self.parameter_groups.shape[0]\n self.control_data.nobsgp = self.observation_data.obgnme.\\\n value_counts().shape[0] + self.prior_information.obgnme.\\\n value_counts().shape[0]\n\n self.control_data.nprior = self.prior_information.shape[0]\n self.control_data.ntplfle = len(self.template_files)\n self.control_data.ninsfle = len(self.instruction_files)\n self.control_data.numcom = len(self.model_command)\n\n def rectify_pgroups(self):\n \"\"\" synchronize parameter groups section with the parameter data section\n\n Note:\n This method is called during `Pst.write()` to make sure all parameter\n groups named in `* parameter data` are included. This is so users\n don't have to manually keep this section up. This method can also be\n called during control file modifications to see what parameter groups\n are present and prepare for modifying the default values in the `* parameter\n group` section\n\n Example::\n\n pst = pyemu.Pst(\"my.pst\")\n pst.parameter_data.loc[\"par1\",\"pargp\"] = \"new_group\"\n pst.rectify_groups()\n pst.parameter_groups.loc[\"new_group\",\"derinc\"] = 1.0\n\n\n \"\"\"\n # add any parameters groups\n pdata_groups = list(self.parameter_data.loc[:,\"pargp\"].\\\n value_counts().keys())\n #print(pdata_groups)\n need_groups = []\n existing_groups = list(self.parameter_groups.pargpnme)\n for pg in pdata_groups:\n if pg not in existing_groups:\n need_groups.append(pg)\n if len(need_groups) > 0:\n #print(need_groups)\n defaults = copy.copy(pst_utils.pst_config[\"pargp_defaults\"])\n for grp in need_groups:\n defaults[\"pargpnme\"] = grp\n self.parameter_groups = \\\n self.parameter_groups.append(defaults,ignore_index=True)\n\n # now drop any left over groups that aren't needed\n for gp in self.parameter_groups.loc[:,\"pargpnme\"]:\n if gp in pdata_groups and gp not in need_groups:\n need_groups.append(gp)\n self.parameter_groups.index = self.parameter_groups.pargpnme\n self.parameter_groups = self.parameter_groups.loc[need_groups,:]\n idx = self.parameter_groups.index.drop_duplicates()\n if idx.shape[0] != self.parameter_groups.shape[0]:\n warnings.warn(\"dropping duplicate parameter groups\",PyemuWarning)\n self.parameter_groups = self.parameter_groups.loc[~self.parameter_groups.\\\n index.duplicated(keep='first'),:]\n\n def _parse_pi_par_names(self):\n \"\"\" private method to get the parameter names from prior information\n equations. Sets a 'names' column in Pst.prior_information that is a list\n of parameter names\n\n\n \"\"\"\n if self.prior_information.shape[0] == 0:\n return\n if \"names\" in self.prior_information.columns:\n self.prior_information.pop(\"names\")\n if \"rhs\" in self.prior_information.columns:\n self.prior_information.pop(\"rhs\")\n def parse(eqs):\n raw = eqs.split('=')\n #rhs = float(raw[1])\n raw = [i for i in re.split('[###]',\n raw[0].lower().strip().replace(' + ','###').replace(' - ','###')) if i != '']\n # in case of a leading '-' or '+'\n if len(raw[0]) == 0:\n raw = raw[1:]\n # pnames = []\n # for r in raw:\n # if '*' not in r:\n # continue\n # pname = r.split('*')[1].replace(\"log(\", '').replace(')', '').strip()\n # pnames.append(pname)\n # return pnames\n return [r.split('*')[1].replace(\"log(\",'').replace(')','').strip() for r in raw if '*' in r]\n\n self.prior_information.loc[:,\"names\"] =\\\n self.prior_information.equation.apply(lambda x: parse(x))\n\n\n def add_pi_equation(self,par_names,pilbl=None,rhs=0.0,weight=1.0,\n obs_group=\"pi_obgnme\",coef_dict={}):\n \"\"\" a helper to construct a new prior information equation.\n\n Args:\n par_names ([`str`]): parameter names in the equation\n pilbl (`str`): name to assign the prior information equation. If None,\n a generic equation name is formed. Default is None\n rhs (`float`): the right-hand side of the pi equation\n weight (`float`): the weight of the equation\n obs_group (`str`): the observation group for the equation. Default is 'pi_obgnme'\n coef_dict (`dict`): a dictionary of parameter name, coefficient pairs to assign\n leading coefficients for one or more parameters in the equation.\n If a parameter is not listed, 1.0 is used for its coefficients.\n Default is {}\n\n \"\"\"\n if pilbl is None:\n pilbl = \"pilbl_{0}\".format(self.__pi_count)\n self.__pi_count += 1\n missing,fixed = [],[]\n\n for par_name in par_names:\n if par_name not in self.parameter_data.parnme:\n missing.append(par_name)\n elif self.parameter_data.loc[par_name,\"partrans\"] in [\"fixed\",\"tied\"]:\n fixed.append(par_name)\n if len(missing) > 0:\n raise Exception(\"Pst.add_pi_equation(): the following pars \"+\\\n \" were not found: {0}\".format(','.join(missing)))\n if len(fixed) > 0:\n raise Exception(\"Pst.add_pi_equation(): the following pars \"+\\\n \" were are fixed/tied: {0}\".format(','.join(missing)))\n eqs_str = ''\n sign = ''\n for i,par_name in enumerate(par_names):\n coef = coef_dict.get(par_name,1.0)\n if coef < 0.0:\n sign = '-'\n coef = np.abs(coef)\n elif i > 0: sign = '+'\n if self.parameter_data.loc[par_name,\"partrans\"] == \"log\":\n par_name = \"log({})\".format(par_name)\n eqs_str += \" {0} {1} * {2} \".format(sign,coef,par_name)\n eqs_str += \" = {0}\".format(rhs)\n self.prior_information.loc[pilbl,\"pilbl\"] = pilbl\n self.prior_information.loc[pilbl,\"equation\"] = eqs_str\n self.prior_information.loc[pilbl,\"weight\"] = weight\n self.prior_information.loc[pilbl,\"obgnme\"] = obs_group\n\n def rectify_pi(self):\n \"\"\" rectify the prior information equation with the current state of the\n parameter_data dataframe.\n\n\n Note:\n Equations that list fixed, tied or missing parameters\n are removed completely even if adjustable parameters are also\n listed in the equation. This method is called during Pst.write()\n\n \"\"\"\n if self.prior_information.shape[0] == 0:\n return\n self._parse_pi_par_names()\n adj_names = self.adj_par_names\n def is_good(names):\n for n in names:\n if n not in adj_names:\n return False\n return True\n keep_idx = self.prior_information.names.\\\n apply(lambda x: is_good(x))\n self.prior_information = self.prior_information.loc[keep_idx,:]\n\n def _write_df(self,name,f,df,formatters,columns):\n if name.startswith('*'):\n f.write(name+'\\n')\n if self.with_comments:\n for line in self.comments.get(name, []):\n f.write(line+'\\n')\n if df.loc[:,columns].isnull().values.any():\n #warnings.warn(\"WARNING: NaNs in {0} dataframe\".format(name))\n csv_name = \"pst.{0}.nans.csv\".format(name.replace(\" \",'_').replace('*',''))\n df.to_csv(csv_name)\n raise Exception(\"NaNs in {0} dataframe, csv written to {1}\".format(name, csv_name))\n def ext_fmt(x):\n if pd.notnull(x):\n return \" # {0}\".format(x)\n return ''\n if self.with_comments and 'extra' in df.columns:\n df.loc[:,\"extra_str\"] = df.extra.apply(ext_fmt)\n columns.append(\"extra_str\")\n #formatters[\"extra\"] = lambda x: \" # {0}\".format(x) if pd.notnull(x) else 'test'\n #formatters[\"extra\"] = lambda x: ext_fmt(x)\n\n # only write out the dataframe if it contains data - could be empty\n if len(df) > 0:\n f.write(df.to_string(col_space=0,formatters=formatters,\n columns=columns,\n justify=\"right\",\n header=False,\n index=False) + '\\n')\n\n def sanity_checks(self):\n \"\"\"some basic check for strangeness\n\n Note:\n checks for duplicate names, atleast 1 adjustable parameter\n and at least 1 non-zero-weighted observation\n\n \"\"\"\n\n dups = self.parameter_data.parnme.value_counts()\n dups = dups.loc[dups>1]\n if dups.shape[0] > 0:\n warnings.warn(\"duplicate parameter names: {0}\".format(','.join(list(dups.index))),PyemuWarning)\n dups = self.observation_data.obsnme.value_counts()\n dups = dups.loc[dups>1]\n if dups.shape[0] > 0:\n warnings.warn(\"duplicate observation names: {0}\".format(','.join(list(dups.index))),PyemuWarning)\n\n if self.npar_adj == 0:\n warnings.warn(\"no adjustable pars\",PyemuWarning)\n\n if self.nnz_obs == 0:\n warnings.warn(\"no non-zero weight obs\",PyemuWarning)\n\n #print(\"noptmax: {0}\".format(self.control_data.noptmax))\n\n\n\n def _write_version2(self,new_filename,update_regul=True,external=True):\n self.new_filename = new_filename\n self.rectify_pgroups()\n self.rectify_pi()\n self._update_control_section()\n self.sanity_checks()\n\n f_out = open(new_filename, 'w')\n if self.with_comments:\n for line in self.comments.get(\"initial\", []):\n f_out.write(line + '\\n')\n f_out.write(\"pcf version=2\\n\")\n self.control_data.write_keyword(f_out)\n\n if self.with_comments:\n for line in self.comments.get(\"* singular value decompisition\",[]):\n f_out.write(line)\n self.svd_data.write_keyword(f_out)\n\n if self.control_data.pestmode.lower().startswith(\"r\"):\n self.reg_data.write_keyword(f_out)\n\n for k,v in self.pestpp_options.items():\n f_out.write(\"{0:30} {1:>10}\\n\".format(k,v))\n\n f_out.write(\"* parameter groups\\n\")\n pargp_filename = new_filename.lower().replace(\".pst\",\".pargrp_data.csv\")\n self.parameter_groups.to_csv(pargp_filename,index=False)\n f_out.write(\"external {0}\\n\".format(pargp_filename))\n\n f_out.write(\"* parameter data\\n\")\n par_filename = new_filename.lower().replace(\".pst\", \".par_data.csv\")\n self.parameter_data.to_csv(par_filename,index=False)\n f_out.write(\"external {0}\\n\".format(par_filename))\n\n f_out.write(\"* observation data\\n\")\n obs_filename = new_filename.lower().replace(\".pst\", \".obs_data.csv\")\n self.observation_data.to_csv(obs_filename,index=False)\n f_out.write(\"external {0}\\n\".format(obs_filename))\n\n f_out.write(\"* model command line\\n\")\n for mc in self.model_command:\n f_out.write(\"{0}\\n\".format(mc))\n\n f_out.write(\"* model input\\n\")\n io_filename = new_filename.lower().replace(\".pst\",\".tplfile_data.csv\")\n pfiles = self.template_files\n #pfiles.extend(self.instruction_files)\n mfiles = self.input_files\n #mfiles.extend(self.output_files)\n io_df = pd.DataFrame({\"pest_file\":pfiles,\"model_file\":mfiles})\n io_df.to_csv(io_filename,index=False)\n f_out.write(\"external {0}\\n\".format(io_filename))\n\n f_out.write(\"* model output\\n\")\n io_filename = new_filename.lower().replace(\".pst\", \".insfile_data.csv\")\n pfiles = self.instruction_files\n mfiles = self.output_files\n io_df = pd.DataFrame({\"pest_file\": pfiles, \"model_file\": mfiles})\n io_df.to_csv(io_filename,index=False)\n f_out.write(\"external {0}\\n\".format(io_filename))\n\n if self.prior_information.shape[0] > 0:\n f_out.write(\"* prior information\\n\")\n pi_filename = new_filename.lower().replace(\".pst\", \".pi_data.csv\")\n self.prior_information.to_csv(pi_filename,index=False)\n f_out.write(\"external {0}\\n\".format(pi_filename))\n\n\n def write(self,new_filename,update_regul=True,version=None):\n \"\"\"main entry point to write a pest control file.\n\n Args:\n new_filename (`str`): name of the new pest control file\n update_regul (`bool`): flag to update zero-order Tikhonov prior information\n equations to prefer the current parameter values\n version (`int`): flag for which version of control file to write (must be 1 or 2).\n if None, uses Pst._version, which set in the constructor and modified\n during the load\n\n Example::\n\n pst = pyemu.Pst(\"my.pst\")\n pst.parrep(\"my.par\")\n pst.write(my_new.pst\", update_regul=True)\n\n \"\"\"\n if version is None:\n version = self._version\n\n vstring = \"noptmax:{0}, npar_adj:{1}, nnz_obs:{2}\".format(self.control_data.noptmax,\n self.npar_adj,self.nnz_obs)\n print(vstring)\n\n if version == 1:\n return self._write_version1(new_filename=new_filename,update_regul=update_regul)\n elif version == 2:\n return self._write_version2(new_filename=new_filename, update_regul=update_regul)\n else:\n raise Exception(\"Pst.write() error: version must be 1 or 2, not '{0}'\".format(version))\n\n def _write_version1(self,new_filename,update_regul=False):\n \"\"\"write a version 1 pest control file\n\n\n\n \"\"\"\n self.new_filename = new_filename\n self.rectify_pgroups()\n self.rectify_pi()\n self._update_control_section()\n self.sanity_checks()\n\n f_out = open(new_filename, 'w')\n if self.with_comments:\n for line in self.comments.get(\"initial\",[]):\n f_out.write(line+'\\n')\n f_out.write(\"pcf\\n* control data\\n\")\n self.control_data.write(f_out)\n\n # for line in self.other_lines:\n # f_out.write(line)\n if self.with_comments:\n for line in self.comments.get(\"* singular value decompisition\",[]):\n f_out.write(line)\n self.svd_data.write(f_out)\n\n #f_out.write(\"* parameter groups\\n\")\n\n # to catch the byte code ugliness in python 3\n pargpnme = self.parameter_groups.loc[:,\"pargpnme\"].copy()\n self.parameter_groups.loc[:,\"pargpnme\"] = \\\n self.parameter_groups.pargpnme.apply(self.pargp_format[\"pargpnme\"])\n\n self._write_df(\"* parameter groups\", f_out, self.parameter_groups,\n self.pargp_format, self.pargp_fieldnames)\n self.parameter_groups.loc[:,\"pargpnme\"] = pargpnme\n\n self._write_df(\"* parameter data\",f_out, self.parameter_data,\n self.par_format, self.par_fieldnames)\n\n if self.tied is not None:\n self._write_df(\"tied parameter data\", f_out, self.tied,\n self.tied_format, self.tied_fieldnames)\n\n f_out.write(\"* observation groups\\n\")\n for group in self.obs_groups:\n try:\n group = group.decode()\n except Exception as e:\n pass\n f_out.write(pst_utils.SFMT(str(group))+'\\n')\n for group in self.prior_groups:\n try:\n group = group.decode()\n except Exception as e:\n pass\n f_out.write(pst_utils.SFMT(str(group))+'\\n')\n self._write_df(\"* observation data\", f_out, self.observation_data,\n self.obs_format, self.obs_fieldnames)\n\n f_out.write(\"* model command line\\n\")\n for cline in self.model_command:\n f_out.write(cline+'\\n')\n\n f_out.write(\"* model input/output\\n\")\n for tplfle,infle in zip(self.template_files,self.input_files):\n f_out.write(tplfle+' '+infle+'\\n')\n for insfle,outfle in zip(self.instruction_files,self.output_files):\n f_out.write(insfle+' '+outfle+'\\n')\n\n if self.nprior > 0:\n if self.prior_information.isnull().values.any():\n #print(\"WARNING: NaNs in prior_information dataframe\")\n warnings.warn(\"NaNs in prior_information dataframe\",PyemuWarning)\n f_out.write(\"* prior information\\n\")\n #self.prior_information.index = self.prior_information.pop(\"pilbl\")\n max_eq_len = self.prior_information.equation.apply(lambda x:len(x)).max()\n eq_fmt_str = \" {0:<\" + str(max_eq_len) + \"s} \"\n eq_fmt_func = lambda x:eq_fmt_str.format(x)\n # 17/9/2016 - had to go with a custom writer loop b/c pandas doesn't want to\n # output strings longer than 100, even with display.max_colwidth\n #f_out.write(self.prior_information.to_string(col_space=0,\n # columns=self.prior_fieldnames,\n # formatters=pi_formatters,\n # justify=\"right\",\n # header=False,\n # index=False) + '\\n')\n #self.prior_information[\"pilbl\"] = self.prior_information.index\n # for idx,row in self.prior_information.iterrows():\n # f_out.write(pst_utils.SFMT(row[\"pilbl\"]))\n # f_out.write(eq_fmt_func(row[\"equation\"]))\n # f_out.write(pst_utils.FFMT(row[\"weight\"]))\n # f_out.write(pst_utils.SFMT(row[\"obgnme\"]) + '\\n')\n for _, row in self.prior_information.iterrows():\n f_out.write(pst_utils.SFMT(row[\"pilbl\"]))\n f_out.write(eq_fmt_func(row[\"equation\"]))\n f_out.write(pst_utils.FFMT(row[\"weight\"]))\n f_out.write(pst_utils.SFMT(row[\"obgnme\"]))\n if self.with_comments and 'extra' in row:\n f_out.write(\" # {0}\".format(row['extra']))\n f_out.write('\\n')\n\n if self.control_data.pestmode.startswith(\"regul\"):\n #f_out.write(\"* regularisation\\n\")\n #if update_regul or len(self.regul_lines) == 0:\n # f_out.write(self.regul_section)\n #else:\n # [f_out.write(line) for line in self.regul_lines]\n self.reg_data.write(f_out)\n\n for line in self.other_lines:\n f_out.write(line+'\\n')\n\n for key,value in self.pestpp_options.items():\n if isinstance(value,list):\n value = ','.join([str(v) for v in value])\n f_out.write(\"++{0}({1})\\n\".format(str(key),str(value)))\n\n if self.with_comments:\n for line in self.comments.get(\"final\",[]):\n f_out.write(line+'\\n')\n\n f_out.close()\n\n\n def get(self, par_names=None, obs_names=None):\n \"\"\"get a new pst object with subset of parameters and/or observations\n\n Args:\n par_names ([`str`]): a list of parameter names to have in the new Pst instance.\n If None, all parameters are in the new Pst instance. Default\n is None\n obs_names ([`str`]): a list of observation names to have in the new Pst instance.\n If None, all observations are in teh new Pst instance. Default\n is None\n\n Returns:\n `Pst`: a new Pst instance\n\n Note:\n passing `par_names` as `None` and `obs_names` as `None` effectively\n generates a copy of the current `Pst`\n\n \"\"\"\n\n #if par_names is None and obs_names is None:\n # return copy.deepcopy(self)\n if par_names is None:\n par_names = self.parameter_data.parnme\n if obs_names is None:\n obs_names = self.observation_data.obsnme\n\n new_par = self.parameter_data.copy()\n if par_names is not None:\n new_par.index = new_par.parnme\n new_par = new_par.loc[par_names, :]\n new_obs = self.observation_data.copy()\n new_res = None\n\n if obs_names is not None:\n new_obs.index = new_obs.obsnme\n new_obs = new_obs.loc[obs_names]\n if self.__res is not None:\n new_res = copy.deepcopy(self.res)\n new_res.index = new_res.name\n new_res = new_res.loc[obs_names, :]\n\n self.rectify_pgroups()\n new_pargp = self.parameter_groups.copy()\n new_pargp.index = new_pargp.pargpnme.apply(str.strip)\n new_pargp_names = new_par.pargp.value_counts().index\n new_pargp.reindex(new_pargp_names)\n\n new_pst = Pst(self.filename, resfile=self.resfile, load=False)\n new_pst.parameter_data = new_par\n new_pst.observation_data = new_obs\n new_pst.parameter_groups = new_pargp\n new_pst.__res = new_res\n new_pst.prior_information = self.prior_information\n new_pst.rectify_pi()\n new_pst.control_data = self.control_data.copy()\n\n new_pst.model_command = self.model_command\n new_pst.template_files = self.template_files\n new_pst.input_files = self.input_files\n new_pst.instruction_files = self.instruction_files\n new_pst.output_files = self.output_files\n\n if self.tied is not None:\n warnings.warn(\"Pst.get() not checking for tied parameter \" +\n \"compatibility in new Pst instance\",PyemuWarning)\n #new_pst.tied = self.tied.copy()\n new_pst.other_lines = self.other_lines\n new_pst.pestpp_options = self.pestpp_options\n new_pst.regul_lines = self.regul_lines\n\n return new_pst\n\n\n def parrep(self, parfile=None,enforce_bounds=True):\n \"\"\"replicates the pest parrep util. replaces the parval1 field in the\n parameter data section dataframe with values in a PEST parameter file\n\n Args:\n parfile (`str`, optional): parameter file to use. If None, try to find and use\n a parameter file that corresponds to the case name.\n Default is None\n enforce_bounds (`bool`, optional): flag to enforce parameter bounds after parameter values are updated.\n This is useful because PEST and PEST++ round the parameter values in the\n par file, which may cause slight bound violations. Default is `True`\n\n \"\"\"\n if parfile is None:\n parfile = self.filename.replace(\".pst\", \".par\")\n par_df = pst_utils.read_parfile(parfile)\n self.parameter_data.index = self.parameter_data.parnme\n par_df.index = par_df.parnme\n self.parameter_data.parval1 = par_df.parval1\n self.parameter_data.scale = par_df.scale\n self.parameter_data.offset = par_df.offset\n\n if enforce_bounds:\n par = self.parameter_data\n idx = par.loc[par.parval1 > par.parubnd,\"parnme\"]\n par.loc[idx,\"parval1\"] = par.loc[idx,\"parubnd\"]\n idx = par.loc[par.parval1 < par.parlbnd,\"parnme\"]\n par.loc[idx, \"parval1\"] = par.loc[idx, \"parlbnd\"]\n\n\n\n # jwhite - 13 Aug 2019 - gonna remove this because the rec file format is changing a lot\n # and that makes this method dangerous\n # def adjust_weights_recfile(self, recfile=None,original_ceiling=True):\n # \"\"\"adjusts the weights by group of the observations based on the phi components\n # in a pest record file so that total phi is equal to the number of\n # non-zero weighted observations\n #\n # Parameters\n # ----------\n # recfile : str\n # record file name. If None, try to use a record file\n # with the Pst case name. Default is None\n # original_ceiling : bool\n # flag to keep weights from increasing - this is generally a good idea.\n # Default is True\n #\n # \"\"\"\n # if recfile is None:\n # recfile = self.filename.replace(\".pst\", \".rec\")\n # if not os.path.exists(recfile):\n # raise Exception(\"Pst.adjust_weights_recfile(): recfile not found: \" +\\\n # str(recfile))\n # iter_components = pst_utils.get_phi_comps_from_recfile(recfile)\n # iters = iter_components.keys()\n # iters.sort()\n # obs = self.observation_data\n # ogroups = obs.groupby(\"obgnme\").groups\n # last_complete_iter = None\n # for ogroup in ogroups.keys():\n # for iiter in iters[::-1]:\n # incomplete = False\n # if ogroup not in iter_components[iiter]:\n # incomplete = True\n # break\n # if not incomplete:\n # last_complete_iter = iiter\n # break\n # if last_complete_iter is None:\n # raise Exception(\"Pst.pwtadj2(): no complete phi component\" +\n # \" records found in recfile\")\n # self._adjust_weights_by_phi_components(\n # iter_components[last_complete_iter],original_ceiling)\n\n\n # jwhite - 13 Aug 2019 - removing this one because it has been replaced\n # by adjust_weights_discrepancy (and there are too many adjust_weights methods)\n # def adjust_weights_resfile(self, resfile=None,original_ceiling=True):\n # \"\"\"adjusts the weights by group of the observations based on the phi components\n # in a pest residual file so that total phi is equal to the number of\n # non-zero weighted observations (e.g. Mozorov's discrepancy principal)\n #\n # Args:\n # resfile (`str`): residual file name. If None, try to use a residual file\n # with the Pst case name. Default is None\n # original_ceiling (`bool`): flag to keep weights from increasing - this is\n # generally a good idea. Default is True\n #\n # Example::\n #\n # pst = pyemu.Pst(\"my.pst\")\n # print(pst.phi) #assumes \"my.res\" is colocated with \"my.pst\"\n # pst.adjust_weights_resfile()\n # print(pst.phi) # phi should equal number of non-zero observations\n #\n #\n # \"\"\"\n # if resfile is not None:\n # self.resfile = resfile\n # self.__res = None\n # phi_comps = self.phi_components\n # self._adjust_weights_by_phi_components(phi_comps,original_ceiling)\n\n def adjust_weights_discrepancy(self, resfile=None,original_ceiling=True, bygroups=False):\n \"\"\"adjusts the weights of each non-zero weight observation based\n on the residual in the pest residual file so each observations contribution\n to phi is 1.0 (e.g. Mozorov's discrepancy principal)\n\n Args:\n resfile (`str`): residual file name. If None, try to use a residual file\n with the Pst case name. Default is None\n original_ceiling (`bool`): flag to keep weights from increasing - this is\n generally a good idea. Default is True\n bygroups (`bool`): flag to adjust weights by groups. If False, the weight\n of each non-zero weighted observation is adjusted individually. If True,\n intergroup weighting is preserved (the contribution to each group is used)\n but this may result in some strangeness if some observations in a group have\n a really low phi already.\n\n Example::\n\n pst = pyemu.Pst(\"my.pst\")\n print(pst.phi) #assumes \"my.res\" is colocated with \"my.pst\"\n pst.adjust_weights_discrepancy()\n print(pst.phi) # phi should equal number of non-zero observations\n\n \"\"\"\n if resfile is not None:\n self.resfile = resfile\n self.__res = None\n if bygroups:\n phi_comps = self.phi_components\n self._adjust_weights_by_phi_components(phi_comps,original_ceiling)\n else:\n obs = self.observation_data.loc[self.nnz_obs_names,:]\n swr = (self.res.loc[self.nnz_obs_names,:].residual * obs.weight)**2\n factors = (1.0/swr).apply(np.sqrt)\n if original_ceiling:\n factors = factors.apply(lambda x: 1.0 if x > 1.0 else x)\n self.observation_data.loc[self.nnz_obs_names,\"weight\"] *= factors\n\n\n\n def _adjust_weights_by_phi_components(self, components,original_ceiling):\n \"\"\"private method that resets the weights of observations by group to account for\n residual phi components.\n\n Args:\n components (`dict`): a dictionary of obs group:phi contribution pairs\n original_ceiling (`bool`): flag to keep weights from increasing.\n\n \"\"\"\n obs = self.observation_data\n nz_groups = obs.groupby(obs[\"weight\"].map(lambda x: x == 0)).groups\n ogroups = obs.groupby(\"obgnme\").groups\n for ogroup, idxs in ogroups.items():\n if self.control_data.pestmode.startswith(\"regul\") \\\n and \"regul\" in ogroup.lower():\n continue\n og_phi = components[ogroup]\n nz_groups = obs.loc[idxs,:].groupby(obs.loc[idxs,\"weight\"].\\\n map(lambda x: x == 0)).groups\n og_nzobs = 0\n if False in nz_groups.keys():\n og_nzobs = len(nz_groups[False])\n if og_nzobs == 0 and og_phi > 0:\n raise Exception(\"Pst.adjust_weights_by_phi_components():\"\n \" no obs with nonzero weight,\" +\n \" but phi > 0 for group:\" + str(ogroup))\n if og_phi > 0:\n factor = np.sqrt(float(og_nzobs) / float(og_phi))\n if original_ceiling:\n factor = min(factor,1.0)\n obs.loc[idxs,\"weight\"] = obs.weight[idxs] * factor\n self.observation_data = obs\n\n def __reset_weights(self, target_phis, res_idxs, obs_idxs):\n \"\"\"private method to reset weights based on target phi values\n for each group. This method should not be called directly\n\n Args:\n target_phis (`dict`): target phi contribution for groups to reweight\n res_idxs (`dict`): the index positions of each group of interest\n in the res dataframe\n obs_idxs (`dict`): the index positions of each group of interest\n in the observation data dataframe\n\n \"\"\"\n\n obs = self.observation_data\n res = self.res\n for item in target_phis.keys():\n if item not in res_idxs.keys():\n raise Exception(\"Pst.__reset_weights(): \" + str(item) +\\\n \" not in residual group indices\")\n if item not in obs_idxs.keys():\n raise Exception(\"Pst.__reset_weights(): \" + str(item) +\\\n \" not in observation group indices\")\n #actual_phi = ((self.res.loc[res_idxs[item], \"residual\"] *\n # self.observation_data.loc\n # [obs_idxs[item], \"weight\"])**2).sum()\n actual_phi = (((obs.loc[obs_idxs[item],\"obsval\"] - res.loc[res_idxs[item], \"modelled\"]) *\n self.observation_data.loc[obs_idxs[item], \"weight\"])**2).sum()\n if actual_phi > 0.0:\n weight_mult = np.sqrt(target_phis[item] / actual_phi)\n self.observation_data.loc[obs_idxs[item], \"weight\"] *= weight_mult\n else:\n (\"Pst.__reset_weights() warning: phi group {0} has zero phi, skipping...\".format(item))\n\n\n def _adjust_weights_by_list(self, obslist, weight):\n \"\"\"a private method to reset the weight for a list of observation names. Supports the\n data worth analyses in pyemu.Schur class. This method only adjusts\n observation weights in the current weight is nonzero. User beware!\n\n Args:\n obslist ([`str`]): list of observation names\n weight (`float`): new weight to assign\n \"\"\"\n\n obs = self.observation_data\n if not isinstance(obslist, list):\n obslist = [obslist]\n obslist = set([str(i).lower() for i in obslist])\n # groups = obs.groupby([lambda x:x in obslist,\n # obs.weight.apply(lambda x:x==0.0)]).groups\n # if (True,True) in groups:\n # obs.loc[groups[True,True],\"weight\"] = weight\n reset_names = obs.loc[obs.apply(lambda x: x.obsnme in obslist and x.weight == 0, axis=1), \"obsnme\"]\n if len(reset_names) > 0:\n obs.loc[reset_names, \"weight\"] = weight\n\n def adjust_weights(self,obs_dict=None, obsgrp_dict=None):\n \"\"\"reset the weights of observations or observation groups to contribute a specified\n amount to the composite objective function\n\n Args:\n obs_dict (`dict`, optional): dictionary of observation name,new contribution pairs\n obsgrp_dict (`dict`, optional): dictionary of obs group name,contribution pairs\n\n Note:\n if all observations in a named obs group have zero weight, they will all be\n assigned a non-zero weight so that the request phi contribution\n can be met. Similarly, any observations listed in obs_dict with zero\n weight will also be reset. User beware!\n\n Example::\n\n pst = pyemu.Pst(\"my.pst\")\n\n # adjust a single observation\n pst.adjust_weights(obs_dict={\"obs1\":10})\n\n # adjust a single observation group\n pst.adjust_weights(obsgrp_dict={\"group1\":100.0})\n\n # make all non-zero weighted groups have a contribution of 100.0\n balanced_groups = {grp:100 for grp in pst.nnz_obs_groups}\n pst.adjust_weights(obsgrp_dict=balanced_groups)\n\n \"\"\"\n\n self.observation_data.index = self.observation_data.obsnme\n self.res.index = self.res.name\n\n if obsgrp_dict is not None:\n # reset groups with all zero weights\n obs = self.observation_data\n for grp in obsgrp_dict.keys():\n if obs.loc[obs.obgnme==grp,\"weight\"].sum() == 0.0:\n obs.loc[obs.obgnme==grp,\"weight\"] = 1.0\n res_groups = self.res.groupby(\"group\").groups\n obs_groups = self.observation_data.groupby(\"obgnme\").groups\n self.__reset_weights(obsgrp_dict, res_groups, obs_groups)\n if obs_dict is not None:\n # reset obs with zero weight\n obs = self.observation_data\n for oname in obs_dict.keys():\n if obs.loc[oname,\"weight\"] == 0.0:\n obs.loc[oname,\"weight\"] = 1.0\n\n #res_groups = self.res.groupby(\"name\").groups\n res_groups = self.res.groupby(self.res.index).groups\n #obs_groups = self.observation_data.groupby(\"obsnme\").groups\n obs_groups = self.observation_data.groupby(self.observation_data.index).groups\n self.__reset_weights(obs_dict, res_groups, obs_groups)\n\n\n def proportional_weights(self, fraction_stdev=1.0, wmax=100.0,\n leave_zero=True):\n \"\"\"setup weights inversely proportional to the observation value\n\n Args:\n fraction_stdev (`float`, optional): the fraction portion of the observation\n val to treat as the standard deviation. set to 1.0 for\n inversely proportional. Default is 1.0\n wmax (`float`, optional): maximum weight to allow. Default is 100.0\n\n leave_zero (`bool`, optional): flag to leave existing zero weights.\n Default is True\n\n \"\"\"\n new_weights = []\n for oval, ow in zip(self.observation_data.obsval,\n self.observation_data.weight):\n if leave_zero and ow == 0.0:\n ow = 0.0\n elif oval == 0.0:\n ow = wmax\n else:\n nw = 1.0 / (np.abs(oval) * fraction_stdev)\n ow = min(wmax, nw)\n new_weights.append(ow)\n self.observation_data.weight = new_weights\n\n def calculate_pertubations(self):\n \"\"\" experimental method to calculate finite difference parameter\n pertubations.\n\n Note:\n\n The pertubation values are added to the\n `Pst.parameter_data` attribute - user beware!\n\n \"\"\"\n self.build_increments()\n self.parameter_data.loc[:,\"pertubation\"] = \\\n self.parameter_data.parval1 + \\\n self.parameter_data.increment\n\n self.parameter_data.loc[:,\"out_forward\"] = \\\n self.parameter_data.loc[:,\"pertubation\"] > \\\n self.parameter_data.loc[:,\"parubnd\"]\n\n out_forward = self.parameter_data.groupby(\"out_forward\").groups\n if True in out_forward:\n self.parameter_data.loc[out_forward[True],\"pertubation\"] = \\\n self.parameter_data.loc[out_forward[True],\"parval1\"] - \\\n self.parameter_data.loc[out_forward[True],\"increment\"]\n\n self.parameter_data.loc[:,\"out_back\"] = \\\n self.parameter_data.loc[:,\"pertubation\"] < \\\n self.parameter_data.loc[:,\"parlbnd\"]\n out_back = self.parameter_data.groupby(\"out_back\").groups\n if True in out_back:\n still_out = out_back[True]\n print(self.parameter_data.loc[still_out,:],flush=True)\n\n raise Exception(\"Pst.calculate_pertubations(): \" +\\\n \"can't calc pertubations for the following \"+\\\n \"Parameters {0}\".format(','.join(still_out)))\n\n def build_increments(self):\n \"\"\" experimental method to calculate parameter increments for use\n in the finite difference pertubation calculations\n\n Note:\n user beware!\n\n \"\"\"\n self.enforce_bounds()\n self.add_transform_columns()\n par_groups = self.parameter_data.groupby(\"pargp\").groups\n inctype = self.parameter_groups.groupby(\"inctyp\").groups\n for itype,inc_groups in inctype.items():\n pnames = []\n for group in inc_groups:\n pnames.extend(par_groups[group])\n derinc = self.parameter_groups.loc[group,\"derinc\"]\n self.parameter_data.loc[par_groups[group],\"derinc\"] = derinc\n if itype == \"absolute\":\n self.parameter_data.loc[pnames,\"increment\"] = \\\n self.parameter_data.loc[pnames,\"derinc\"]\n elif itype == \"relative\":\n self.parameter_data.loc[pnames,\"increment\"] = \\\n self.parameter_data.loc[pnames,\"derinc\"] * \\\n self.parameter_data.loc[pnames,\"parval1\"]\n elif itype == \"rel_to_max\":\n mx = self.parameter_data.loc[pnames,\"parval1\"].max()\n self.parameter_data.loc[pnames,\"increment\"] = \\\n self.parameter_data.loc[pnames,\"derinc\"] * mx\n else:\n raise Exception('Pst.get_derivative_increments(): '+\\\n 'unrecognized increment type:{0}'.format(itype))\n\n #account for fixed pars\n isfixed = self.parameter_data.partrans==\"fixed\"\n self.parameter_data.loc[isfixed,\"increment\"] = \\\n self.parameter_data.loc[isfixed,\"parval1\"]\n\n def add_transform_columns(self):\n \"\"\" add transformed values to the `Pst.parameter_data` attribute\n\n Note:\n adds `parval1_trans`, `parlbnd_trans` and `parubnd_trans` to\n `Pst.parameter_data`\n\n\n \"\"\"\n for col in [\"parval1\",\"parlbnd\",\"parubnd\",\"increment\"]:\n if col not in self.parameter_data.columns:\n continue\n self.parameter_data.loc[:,col+\"_trans\"] = (self.parameter_data.loc[:,col] *\n self.parameter_data.scale) +\\\n self.parameter_data.offset\n #isnotfixed = self.parameter_data.partrans != \"fixed\"\n islog = self.parameter_data.partrans == \"log\"\n self.parameter_data.loc[islog,col+\"_trans\"] = \\\n self.parameter_data.loc[islog,col+\"_trans\"].\\\n apply(lambda x:np.log10(x))\n\n def enforce_bounds(self):\n \"\"\" enforce bounds violation\n\n Note:\n cheap enforcement of simply bringing violators back in bounds\n\n \"\"\"\n too_big = self.parameter_data.loc[:,\"parval1\"] > \\\n self.parameter_data.loc[:,\"parubnd\"]\n self.parameter_data.loc[too_big,\"parval1\"] = \\\n self.parameter_data.loc[too_big,\"parubnd\"]\n\n too_small = self.parameter_data.loc[:,\"parval1\"] < \\\n self.parameter_data.loc[:,\"parlbnd\"]\n self.parameter_data.loc[too_small,\"parval1\"] = \\\n self.parameter_data.loc[too_small,\"parlbnd\"]\n\n\n @classmethod\n def from_io_files(cls, tpl_files, in_files, ins_files, out_files,\n pst_filename=None, pst_path=None):\n \"\"\" create a Pst instance from model interface files.\n\n Args:\n tpl_files ([`str`]): list of template file names\n in_files ([`str`]): list of model input file names (pairs with template files)\n ins_files ([`str`]): list of instruction file names\n out_files ([`str`]): list of model output file names (pairs with instruction files)\n pst_filename (`str`): name of control file to write. If None, no file is written.\n Default is None\n pst_path ('str'): the path from the control file to the IO files. For example, if the\n control will be in the same directory as the IO files, then `pst_path` should be '.'.\n Default is '.'\n\n\n Returns:\n `Pst`: new control file instance with parameter and observation names\n found in `tpl_files` and `ins_files`, repsectively.\n\n Note:\n calls `pyemu.helpers.pst_from_io_files()`\n\n Assigns generic values for parameter info. Tries to use INSCHEK\n to set somewhat meaningful observation values\n\n all file paths are relatively to where python is running.\n\n TODO:\n add pst_path option\n make in_files and out_files optional\n\n Example::\n\n tpl_files = [\"my.tpl\"]\n in_files = [\"my.in\"]\n ins_files = [\"my.ins\"]\n out_files = [\"my.out\"]\n pst = pyemu.Pst.from_io_files(tpl_files,in_files,ins_files,out_files)\n pst.control_data.noptmax = 0\n pst.write(\"my.pst)\n\n\n\n \"\"\"\n from pyemu import helpers\n return helpers.pst_from_io_files(tpl_files=tpl_files,in_files=in_files,\n ins_files=ins_files,out_files=out_files,\n pst_filename=pst_filename, pst_path=pst_path)\n\n\n def add_parameters(self,template_file,in_file=None,pst_path=None):\n \"\"\" add new parameters to an existing control file\n\n Args:\n template_file (`str`): template file with (possibly) some new parameters\n in_file (`str`): model input file. If None, template_file.replace('.tpl','') is used.\n Default is None.\n pst_path (`str`): the path to append to the template_file and in_file in the control file. If\n not None, then any existing path in front of the template or in file is split off\n and pst_path is prepended. If python is being run in a directory other than where the control\n file will reside, it is useful to pass `pst_path` as `.`. Default is None\n\n Returns:\n `pandas.DataFrame`: the data for the new parameters that were added.\n If no new parameters are in the new template file, returns None\n\n Note:\n populates the new parameter information with default values\n\n Example::\n\n pst = pyemu.Pst(os.path.join(\"template\",\"my.pst\"))\n pst.add_parameters(os.path.join(\"template\",\"new_pars.dat.tpl\",pst_path=\".\")\n pst.write(os.path.join(\"template\",\"my_new.pst\")\n\n \"\"\"\n if not os.path.exists(template_file):\n raise Exception(\"template file '{0}' not found\".format(template_file))\n if template_file == in_file:\n raise Exception(\"template_file == in_file\")\n # get the parameter names in the template file\n parnme = pst_utils.parse_tpl_file(template_file)\n\n # find \"new\" parameters that are not already in the control file\n new_parnme = [p for p in parnme if p not in self.parameter_data.parnme]\n\n if len(new_parnme) == 0:\n warnings.warn(\"no new parameters found in template file {0}\".format(template_file),PyemuWarning)\n new_par_data = None\n else:\n # extend pa\n # rameter_data\n new_par_data = pst_utils._populate_dataframe(new_parnme, pst_utils.pst_config[\"par_fieldnames\"],\n pst_utils.pst_config[\"par_defaults\"],\n pst_utils.pst_config[\"par_dtype\"])\n new_par_data.loc[new_parnme,\"parnme\"] = new_parnme\n self.parameter_data = self.parameter_data.append(new_par_data)\n if in_file is None:\n in_file = template_file.replace(\".tpl\",'')\n if pst_path is not None:\n template_file = os.path.join(pst_path,os.path.split(template_file)[-1])\n in_file = os.path.join(pst_path, os.path.split(in_file)[-1])\n self.template_files.append(template_file)\n self.input_files.append(in_file)\n\n return new_par_data\n\n\n def add_observations(self,ins_file,out_file=None,pst_path=None,inschek=True):\n \"\"\" add new observations to a control file\n\n Args:\n ins_file (`str`): instruction file with exclusively new observation names\n out_file (`str`): model output file. If None, then ins_file.replace(\".ins\",\"\") is used.\n Default is None\n pst_path (`str`): the path to append to the instruction file and out file in the control file. If\n not None, then any existing path in front of the template or in file is split off\n and pst_path is prepended. If python is being run in a directory other than where the control\n file will reside, it is useful to pass `pst_path` as `.`. Default is None\n inschek (`bool`): flag to try to process the existing output file using the `pyemu.InstructionFile`\n class. If successful, processed outputs are used as obsvals\n\n Returns:\n `pandas.DataFrame`: the data for the new observations that were added\n\n Note:\n populates the new observation information with default values\n\n Example::\n\n pst = pyemu.Pst(os.path.join(\"template\",\"my.pst\"))\n pst.add_observations(os.path.join(\"template\",\"new_obs.dat.tpl\",pst_path=\".\")\n pst.write(os.path.join(\"template\",\"my_new.pst\")\n\n \"\"\"\n if not os.path.exists(ins_file):\n raise Exception(\"ins file not found: {0}, {1}\".format(os.getcwd(),ins_file))\n if out_file is None:\n out_file = ins_file.replace(\".ins\",\"\")\n if ins_file == out_file:\n raise Exception(\"ins_file == out_file, doh!\")\n\n # get the parameter names in the template file\n obsnme = pst_utils.parse_ins_file(ins_file)\n\n sobsnme = set(obsnme)\n sexist = set(self.obs_names)\n sint = sobsnme.intersection(sexist)\n if len(sint) > 0:\n raise Exception(\"the following obs instruction file {0} are already in the control file:{1}\".\n format(ins_file,','.join(sint)))\n\n # find \"new\" parameters that are not already in the control file\n new_obsnme = [o for o in obsnme if o not in self.observation_data.obsnme]\n\n if len(new_obsnme) == 0:\n raise Exception(\"no new observations found in instruction file {0}\".format(ins_file))\n\n # extend observation_data\n new_obs_data = pst_utils._populate_dataframe(new_obsnme, pst_utils.pst_config[\"obs_fieldnames\"],\n pst_utils.pst_config[\"obs_defaults\"],\n pst_utils.pst_config[\"obs_dtype\"])\n new_obs_data.loc[new_obsnme,\"obsnme\"] = new_obsnme\n new_obs_data.index = new_obsnme\n self.observation_data = self.observation_data.append(new_obs_data)\n cwd = '.'\n if pst_path is not None:\n cwd = os.path.join(*os.path.split(ins_file)[:-1])\n ins_file = os.path.join(pst_path,os.path.split(ins_file)[-1])\n out_file = os.path.join(pst_path, os.path.split(out_file)[-1])\n self.instruction_files.append(ins_file)\n self.output_files.append(out_file)\n df = None\n if inschek:\n #df = pst_utils._try_run_inschek(ins_file,out_file,cwd=cwd)\n ins_file = os.path.join(cwd,ins_file)\n out_file = os.path.join(cwd,out_file)\n df = pst_utils.try_process_output_file(ins_file=ins_file,output_file=out_file)\n if df is not None:\n #print(self.observation_data.index,df.index)\n self.observation_data.loc[df.index,\"obsval\"] = df.obsval\n new_obs_data.loc[df.index,\"obsval\"] = df.obsval\n return new_obs_data\n\n def write_input_files(self,pst_path='.'):\n \"\"\"writes model input files using template files and current `parval1` values.\n\n Args:\n pst_path (`str`): the path to where control file and template files reside.\n Default is '.'\n\n Note:\n adds \"parval1_trans\" column to Pst.parameter_data that includes the\n effect of scale and offset\n\n Example::\n\n pst = pyemu.Pst(\"my.pst\")\n\n # load final parameter values\n pst.parrep(\"my.par\")\n\n # write new model input files with final parameter values\n pst.write_input_files()\n\n \"\"\"\n pst_utils.write_input_files(self,pst_path=pst_path)\n\n\n def process_output_files(self,pst_path='.'):\n \"\"\"processing the model output files using the instruction files\n and existing model output files.\n\n Args:\n pst_path (`str`): relative path from where python is running to\n where the control file, instruction files and model output files\n are located. Default is \".\" (current python directory)\n\n Returns:\n `pandas.Series`: model output values\n\n Note:\n requires a complete set of model input files at relative path\n from where python is running to `pst_path`\n\n \"\"\"\n return pst_utils.process_output_files(self,pst_path)\n\n\n def get_res_stats(self,nonzero=True):\n \"\"\" get some common residual stats by observation group.\n\n Args:\n nonzero (`bool`): calculate stats using only nonzero-weighted observations. This may seem\n obsvious to most users, but you never know....\n\n Returns:\n `pd.DataFrame`: a dataframe with columns for groups names and indices of statistic name.\n\n Note:\n Stats are derived from the current obsvals, weights and grouping in\n `Pst.observation_data` and the `modelled` values in `Pst.res`. The\n key here is 'current' because if obsval, weights and/or groupings have\n changed in `Pst.observation_data` since the residuals file was generated\n then the current values for `obsval`, `weight` and `group` are used\n\n the normalized RMSE is normalized against the obsval range (max - min)\n\n \"\"\"\n res = self.res.copy()\n res.loc[:,\"obsnme\"] = res.pop(\"name\")\n res.index = res.obsnme\n if nonzero:\n obs = self.observation_data.loc[self.nnz_obs_names,:]\n #print(obs.shape,res.shape)\n res = res.loc[obs.obsnme,:]\n #print(obs.shape, res.shape)\n\n #reset the res parts to current obs values and remove\n #duplicate attributes\n res.loc[:,\"weight\"] = obs.weight\n res.loc[:,\"obsval\"] = obs.obsval\n res.loc[:,\"obgnme\"] = obs.obgnme\n res.pop(\"group\")\n res.pop(\"measured\")\n\n #build these attribute lists for faster lookup later\n og_dict = {og:res.loc[res.obgnme==og,\"obsnme\"] for og in res.obgnme.unique()}\n og_names = list(og_dict.keys())\n\n # the list of functions and names\n sfuncs = [self._stats_rss, self._stats_mean,self._stats_mae,\n self._stats_rmse,self._stats_nrmse]\n snames = [\"rss\",\"mean\",\"mae\",\"rmse\",\"nrmse\"]\n\n data = []\n for sfunc in sfuncs:\n full = sfunc(res)\n groups = [full]\n for og in og_names:\n onames = og_dict[og]\n res_og = res.loc[onames,:]\n groups.append(sfunc(res_og))\n data.append(groups)\n\n og_names.insert(0,\"all\")\n stats = pd.DataFrame(data,columns=og_names,index=snames)\n return stats\n\n @staticmethod\n def _stats_rss(df):\n return (((df.modelled - df.obsval) * df.weight)**2).sum()\n\n @staticmethod\n def _stats_mean(df):\n return (df.modelled - df.obsval).mean()\n\n @staticmethod\n def _stats_mae(df):\n return ((df.modelled - df.obsval).apply(np.abs)).sum() / df.shape[0]\n\n @staticmethod\n def _stats_rmse(df):\n return np.sqrt(((df.modelled - df.obsval)**2).sum() / df.shape[0])\n\n @staticmethod\n def _stats_nrmse(df):\n return Pst._stats_rmse(df) / (df.obsval.max() - df.obsval.min())\n\n\n def plot(self,kind=None,**kwargs):\n \"\"\"method to plot various parts of the control. This is sweet as!\n\n Args:\n kind (`str`): options are 'prior' (prior parameter histograms, '1to1' (line of equality\n and sim vs res), 'obs_v_sim' (time series using datetime suffix), 'phi_pie'\n (pie chart of phi components)\n kwargs (`dict`): optional args for plots that are passed to pyemu plot helpers and ultimately\n to matplotlib\n\n Note:\n Depending on 'kind' argument, a multipage pdf is written\n\n Example::\n\n pst = pyemu.Pst(\"my.pst\")\n pst.plot(kind=\"1to1\") # requires Pst.res\n pst.plot(kind=\"prior\")\n pst.plot(kind=\"phi_pie\")\n\n\n \"\"\"\n return plot_utils.pst_helper(self,kind,**kwargs)\n\n\n\n\n def write_par_summary_table(self,filename=None,group_names=None,\n sigma_range = 4.0):\n \"\"\"write a stand alone parameter summary latex table\n\n\n Args:\n filename (`str`): latex filename. If None, use <case>.par.tex. If `filename` is \"none\", no table\n is writtenDefault is None\n group_names (`dict`): par group names : table names. For example {\"w0\":\"well stress period 1\"}.\n Default is None\n sigma_range (`float`): number of standard deviations represented by parameter bounds. Default\n is 4.0, implying 95% confidence bounds\n\n Returns:\n `pandas.DataFrame`: the summary parameter group dataframe\n\n Example::\n\n pst = pyemu.Pst(\"my.pst\")\n pst.write_par_summary_table(filename=\"par.tex\")\n\n \"\"\"\n\n ffmt = lambda x: \"{0:5G}\".format(x)\n par = self.parameter_data.copy()\n pargp = par.groupby(par.pargp).groups\n #cols = [\"parval1\",\"parubnd\",\"parlbnd\",\"stdev\",\"partrans\",\"pargp\"]\n cols = [\"pargp\",\"partrans\",\"count\",\"parval1\",\"parubnd\",\"parlbnd\",\"stdev\"]\n\n labels = {\"parval1\":\"initial value\",\"parubnd\":\"upper bound\",\n \"parlbnd\":\"lower bound\",\"partrans\":\"transform\",\n \"stdev\":\"standard deviation\",\"pargp\":\"type\",\"count\":\"count\"}\n\n li = par.partrans == \"log\"\n par.loc[li,\"parval1\"] = par.parval1.loc[li].apply(np.log10)\n par.loc[li, \"parubnd\"] = par.parubnd.loc[li].apply(np.log10)\n par.loc[li, \"parlbnd\"] = par.parlbnd.loc[li].apply(np.log10)\n par.loc[:,\"stdev\"] = (par.parubnd - par.parlbnd) / sigma_range\n\n data = {c:[] for c in cols}\n for pg,pnames in pargp.items():\n par_pg = par.loc[pnames,:]\n data[\"pargp\"].append(pg)\n for col in cols:\n if col in [\"pargp\",\"partrans\"]:\n continue\n if col == \"count\":\n data[\"count\"].append(par_pg.shape[0])\n continue\n #print(col)\n mn = par_pg.loc[:,col].min()\n mx = par_pg.loc[:,col].max()\n if mn == mx:\n data[col].append(ffmt(mn))\n else:\n data[col].append(\"{0} to {1}\".format(ffmt(mn),ffmt(mx)))\n\n pts = par_pg.partrans.unique()\n if len(pts) == 1:\n data[\"partrans\"].append(pts[0])\n else:\n data[\"partrans\"].append(\"mixed\")\n\n pargp_df = pd.DataFrame(data=data,index=list(pargp.keys()))\n pargp_df = pargp_df.loc[:, cols]\n if group_names is not None:\n pargp_df.loc[:, \"pargp\"] = pargp_df.pargp.apply(lambda x: group_names.pop(x, x))\n pargp_df.columns = pargp_df.columns.map(lambda x: labels[x])\n\n preamble = '\\\\documentclass{article}\\n\\\\usepackage{booktabs}\\n'+ \\\n '\\\\usepackage{pdflscape}\\n\\\\usepackage{longtable}\\n' + \\\n '\\\\usepackage{booktabs}\\n\\\\usepackage{nopageno}\\n\\\\begin{document}\\n'\n\n if filename == \"none\":\n return pargp_df\n if filename is None:\n filename = self.filename.replace(\".pst\",\".par.tex\")\n\n with open(filename,'w') as f:\n f.write(preamble)\n f.write(\"\\\\begin{center}\\nParameter Summary\\n\\\\end{center}\\n\")\n f.write(\"\\\\begin{center}\\n\\\\begin{landscape}\\n\")\n pargp_df.to_latex(f, index=False, longtable=True)\n f.write(\"\\\\end{landscape}\\n\")\n f.write(\"\\\\end{center}\\n\")\n f.write(\"\\\\end{document}\\n\")\n return pargp_df\n\n def write_obs_summary_table(self,filename=None,group_names=None):\n \"\"\"write a stand alone observation summary latex table\n\n\n Args:\n filename (`str`): latex filename. If `filename` is \"none\", no table is written.\n If None, use <case>.par.tex. Default is None\n group_names (`dict`): obs group names : table names. For example {\"hds\":\"simulated groundwater level\"}.\n Default is None\n\n Returns:\n `pandas.DataFrame`: the summary observation group dataframe\n\n Example::\n\n pst = pyemu.Pst(\"my.pst\")\n pst.write_obs_summary_table(filename=\"obs.tex\")\n \"\"\"\n\n ffmt = lambda x: \"{0:5G}\".format(x)\n obs = self.observation_data.copy()\n obsgp = obs.groupby(obs.obgnme).groups\n cols = [\"obgnme\",\"obsval\",\"nzcount\",\"zcount\",\"weight\",\"stdev\",\"pe\"]\n\n labels = {\"obgnme\":\"group\",\"obsval\":\"value\",\"nzcount\":\"non-zero weight\",\n \"zcount\":\"zero weight\",\"weight\":\"weight\",\"stdev\":\"standard deviation\",\n \"pe\":\"percent error\"}\n\n obs.loc[:,\"stdev\"] = 1.0 / obs.weight\n obs.loc[:,\"pe\"] = 100.0 * (obs.stdev / obs.obsval.apply(np.abs))\n obs = obs.replace([np.inf,-np.inf],np.NaN)\n\n data = {c: [] for c in cols}\n for og, onames in obsgp.items():\n obs_g = obs.loc[onames, :]\n data[\"obgnme\"].append(og)\n data[\"nzcount\"].append(obs_g.loc[obs_g.weight > 0.0,:].shape[0])\n data[\"zcount\"].append(obs_g.loc[obs_g.weight == 0.0,:].shape[0])\n for col in cols:\n if col in [\"obgnme\",\"nzcount\",\"zcount\"]:\n continue\n\n #print(col)\n mn = obs_g.loc[:, col].min()\n mx = obs_g.loc[:, col].max()\n if np.isnan(mn) or np.isnan(mx):\n data[col].append(\"NA\")\n elif mn == mx:\n data[col].append(ffmt(mn))\n else:\n data[col].append(\"{0} to {1}\".format(ffmt(mn), ffmt(mx)))\n\n\n obsg_df = pd.DataFrame(data=data, index=list(obsgp.keys()))\n obsg_df = obsg_df.loc[:, cols]\n if group_names is not None:\n obsg_df.loc[:, \"obgnme\"] = obsg_df.obgnme.apply(lambda x: group_names.pop(x, x))\n obsg_df.sort_values(by=\"obgnme\",inplace=True,ascending=True)\n obsg_df.columns = obsg_df.columns.map(lambda x: labels[x])\n\n preamble = '\\\\documentclass{article}\\n\\\\usepackage{booktabs}\\n' + \\\n '\\\\usepackage{pdflscape}\\n\\\\usepackage{longtable}\\n' + \\\n '\\\\usepackage{booktabs}\\n\\\\usepackage{nopageno}\\n\\\\begin{document}\\n'\n\n\n if filename == \"none\":\n return obsg_df\n\n if filename is None:\n filename = self.filename.replace(\".pst\", \".obs.tex\")\n\n with open(filename, 'w') as f:\n\n f.write(preamble)\n\n f.write(\"\\\\begin{center}\\nObservation Summary\\n\\\\end{center}\\n\")\n f.write(\"\\\\begin{center}\\n\\\\begin{landscape}\\n\")\n f.write(\"\\\\setlength{\\\\LTleft}{-4.0cm}\\n\")\n obsg_df.to_latex(f, index=False, longtable=True)\n f.write(\"\\\\end{landscape}\\n\")\n f.write(\"\\\\end{center}\\n\")\n f.write(\"\\\\end{document}\\n\")\n\n return obsg_df\n\n\n # jwhite - 13 Aug 2019 - no one is using this write?\n # def run(self,exe_name=\"pestpp\",cwd=None):\n # \"\"\"run a command related to the pst instance. If\n # write() has been called, then the filename passed to write\n # is in the command, otherwise the original constructor\n # filename is used\n #\n # exe_name : str\n # the name of the executable to call. Default is \"pestpp\"\n # cwd : str\n # the directory to execute the command in. If None,\n # os.path.split(self.filename) is used to find\n # cwd. Default is None\n #\n #\n # \"\"\"\n # filename = self.filename\n # if self.new_filename is not None:\n # filename = self.new_filename\n # cmd_line = \"{0} {1}\".format(exe_name,os.path.split(filename)[-1])\n # if cwd is None:\n # cwd = os.path.join(*os.path.split(filename)[:-1])\n # if cwd == '':\n # cwd = '.'\n # print(\"executing {0} in dir {1}\".format(cmd_line, cwd))\n # pyemu.utils.os_utils.run(cmd_line,cwd=cwd)\n\n\n @staticmethod\n def _is_less_const(name):\n constraint_tags = [\"l_\", \"less\"]\n return True in [True for c in constraint_tags if name.startswith(c)]\n\n @property\n def less_than_obs_constraints(self):\n \"\"\"get the names of the observations that\n are listed as active less than inequality constraints.\n\n Returns:\n `pandas.Series`: names of obseravtions that are non-zero weighted less\n than constraints (`obgnme` starts with 'l\\_' or \"less\")\n\n Note:\n Zero-weighted obs are skipped\n\n \"\"\"\n\n\n obs = self.observation_data\n lt_obs = obs.loc[obs.apply(lambda x: self._is_less_const(x.obgnme) \\\n and x.weight != 0.0,axis=1),\"obsnme\"]\n return lt_obs\n\n @property\n def less_than_pi_constraints(self):\n \"\"\"get the names of the prior information eqs that\n are listed as active less than inequality constraints.\n\n Returns:\n `pandas.Series`: names of prior information that are non-zero weighted\n less than constraints (`obgnme` starts with \"l\\_\" or \"less\")\n\n Note:\n Zero-weighted pi are skipped\n\n \"\"\"\n\n pi = self.prior_information\n lt_pi = pi.loc[pi.apply(lambda x: self._is_less_const(x.obgnme) \\\n and x.weight != 0.0, axis=1), \"pilbl\"]\n return lt_pi\n\n @staticmethod\n def _is_greater_const(name):\n constraint_tags = [\"g_\", \"greater\"]\n return True in [True for c in constraint_tags if name.startswith(c)]\n\n @property\n def greater_than_obs_constraints(self):\n \"\"\"get the names of the observations that\n are listed as active greater than inequality constraints.\n\n Returns:\n `pandas.Series`: names obseravtions that are non-zero weighted\n greater than constraints (`obgnme` startsiwth \"g\\_\" or \"greater\")\n\n Note:\n Zero-weighted obs are skipped\n\n \"\"\"\n\n\n\n obs = self.observation_data\n gt_obs = obs.loc[obs.apply(lambda x: self._is_greater_const(x.obgnme) \\\n and x.weight != 0.0,axis=1),\"obsnme\"]\n return gt_obs\n\n @property\n def greater_than_pi_constraints(self):\n \"\"\"get the names of the prior information eqs that\n are listed as active greater than inequality constraints.\n\n Returns:\n `pandas.Series` names of prior information that are non-zero weighted\n greater than constraints (`obgnme` startsiwth \"g\\_\" or \"greater\")\n\n\n Note:\n Zero-weighted pi are skipped\n\n \"\"\"\n\n pi = self.prior_information\n gt_pi = pi.loc[pi.apply(lambda x: self._is_greater_const(x.obgnme) \\\n and x.weight != 0.0, axis=1), \"pilbl\"]\n return gt_pi\n\n\n\n def get_par_change_limits(self):\n \"\"\" calculate the various parameter change limits used in pest.\n\n\n Returns:\n `pandas.DataFrame`: a copy of `Pst.parameter_data`\n with columns for relative and factor change limits\n Note:\n\n does not yet support absolute parameter change limits!\n\n Works in control file values space (not log transformed space). Also\n adds columns for effective upper and lower which account for par bounds and the\n value of parchglim\n\n \"\"\"\n par = self.parameter_data\n fpars = par.loc[par.parchglim==\"factor\",\"parnme\"]\n rpars = par.loc[par.parchglim == \"relative\", \"parnme\"]\n #apars = par.loc[par.parchglim == \"absolute\", \"parnme\"]\n\n change_df = par.copy()\n\n fpm = self.control_data.facparmax\n rpm = self.control_data.relparmax\n facorig = self.control_data.facorig\n base_vals = par.parval1.copy()\n\n # apply zero value correction\n base_vals[base_vals==0] = par.loc[base_vals==0,\"parubnd\"] / 4.0\n\n # apply facorig\n replace_pars = base_vals.index.map(lambda x: par.loc[x,\"partrans\"]!=\"log\" and np.abs(base_vals.loc[x]) < facorig*np.abs(base_vals.loc[x]))\n #print(facorig,replace_pars)\n base_vals.loc[replace_pars] = base_vals.loc[replace_pars] * facorig\n\n # negative fac pars\n nfpars = par.loc[base_vals.apply(lambda x: x < 0)].index\n change_df.loc[nfpars, \"fac_upper\"] = base_vals / fpm\n change_df.loc[nfpars, \"fac_lower\"] = base_vals * fpm\n\n # postive fac pars\n pfpars = par.loc[base_vals.apply(lambda x: x > 0)].index\n change_df.loc[pfpars, \"fac_upper\"] = base_vals * fpm\n change_df.loc[pfpars, \"fac_lower\"] = base_vals / fpm\n\n # relative\n\n rdelta = base_vals.apply(np.abs) * rpm\n change_df.loc[:,\"rel_upper\"] = base_vals + rdelta\n change_df.loc[:,\"rel_lower\"] = base_vals - rdelta\n\n change_df.loc[:,\"chg_upper\"] = np.NaN\n change_df.loc[fpars,\"chg_upper\"] = change_df.fac_upper[fpars]\n change_df.loc[rpars, \"chg_upper\"] = change_df.rel_upper[rpars]\n change_df.loc[:, \"chg_lower\"] = np.NaN\n change_df.loc[fpars, \"chg_lower\"] = change_df.fac_lower[fpars]\n change_df.loc[rpars, \"chg_lower\"] = change_df.rel_lower[rpars]\n\n # effective limits\n change_df.loc[:,\"eff_upper\"] = change_df.loc[:,[\"parubnd\",\"chg_upper\"]].min(axis=1)\n change_df.loc[:,\"eff_lower\"] = change_df.loc[:, [\"parlbnd\", \"chg_lower\"]].max(axis=1)\n\n return change_df\n\n def get_adj_pars_at_bounds(self, frac_tol=0.01):\n \"\"\"get list of adjustable parameter at/near bounds\n\n Args:\n frac_tol ('float`): fractional tolerance of distance to bound. For upper bound,\n the value `parubnd * (1-frac_tol)` is used, lower bound uses `parlbnd * (1.0 + frac_tol)`\n\n Returns:\n tuple containing:\n\n - **[`str`]**: list of parameters at/near lower bound\n - **[`str`]**: list of parameters at/near upper bound\n\n \"\"\"\n\n par = self.parameter_data.loc[self.adj_par_names,:].copy()\n over_ub = par.loc[par.apply(lambda x: x.parval1 >= (1.-frac_tol) * x.parubnd, axis=1),\"parnme\"].tolist()\n under_lb = par.loc[par.apply(lambda x: x.parval1 <= (1.+frac_tol) * x.parlbnd, axis=1),\"parnme\"].tolist()\n\n return under_lb,over_ub\n\n"
] |
[
[
"pandas.notnull",
"pandas.read_csv",
"numpy.abs",
"numpy.sqrt",
"pandas.isnull",
"numpy.isnan",
"pandas.DataFrame",
"numpy.log10",
"numpy.sum"
]
] |
keeeal/alpha-zero-ut3
|
[
"d6ff5e20b97cdd386ca27fa8be06334cc8afdb3d"
] |
[
"ut3/pytorch/UT3NNet.py"
] |
[
"\nimport torch\nfrom torch import nn\n\nclass UT3NNet(nn.Module):\n def __init__(self, game, args):\n self.size = game.getBoardSize()\n self.channels = game.getBoardChannels()\n self.actions = game.getActionSize()\n self.args = args\n\n super(UT3NNet, self).__init__()\n self.drop = nn.Dropout(self.args.dropout)\n self.norm = nn.BatchNorm1d(4*9*args.width)\n self.relu, self.tanh = nn.ReLU(), nn.Tanh()\n self.soft = nn.LogSoftmax(dim=1)\n\n self.conv0 = nn.Conv2d(self.channels, args.width, 3, 1, 1)\n\n self.conv1 = nn.Conv2d(self.channels + args.width, args.width,\n kernel_size=(3,3), stride=3)\n self.conv2 = nn.Conv2d(self.channels + args.width, 3*args.width,\n kernel_size=(3,9), stride=3)\n self.conv3 = nn.Conv2d(self.channels + args.width, 3*args.width,\n kernel_size=(9,3), stride=3)\n self.conv4 = nn.Conv2d(self.channels + args.width, 9*args.width,\n kernel_size=(9,9))\n\n self.out_pi = nn.Linear(4*9*args.width, self.actions)\n self.out_v = nn.Linear(4*9*args.width, 1)\n\n def forward(self, x):\n x = torch.cat((x, self.conv0(x)), dim=1)\n x1 = self.conv1(x).view(-1, 9*self.args.width)\n x2 = self.conv2(x).view(-1, 9*self.args.width)\n x3 = self.conv3(x).view(-1, 9*self.args.width)\n x4 = self.conv4(x).view(-1, 9*self.args.width)\n x = torch.cat((x1, x2, x3, x4), dim=1)\n x = self.relu(self.norm(x))\n\n pi = self.out_pi(x)\n v = self.out_v(x)\n\n return self.soft(pi), self.tanh(v)\n"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.nn.LogSoftmax",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.nn.ReLU"
]
] |
marcnunez/CabinMonitoringV1
|
[
"f95cf73afcd843f1a8a107517f96d4631a5d8726"
] |
[
"train_sppe/src/utils/img.py"
] |
[
"# -----------------------------------------------------\n# Copyright (c) Shanghai Jiao Tong University. All rights reserved.\n# Written by Jiefeng Li ([email protected])\n# -----------------------------------------------------\n\nimport numpy as np\nimport torch\nimport scipy.misc\nimport torch.nn.functional as F\nimport cv2\nfrom opt import opt\n\n\nRED = (0, 0, 255)\nGREEN = (0, 255, 0)\nBLUE = (255, 0, 0)\nCYAN = (255, 255, 0)\nYELLOW = (0, 255, 255)\nORANGE = (0, 165, 255)\nPURPLE = (255, 0, 255)\n\n\ndef im_to_torch(img):\n img = np.transpose(img, (2, 0, 1)) # C*H*W\n img = to_torch(img).float()\n if img.max() > 1:\n img /= 255\n return img\n\n\ndef torch_to_im(img):\n img = to_numpy(img)\n img = np.transpose(img, (1, 2, 0)) # C*H*W\n return img\n\n\ndef load_image(img_path):\n # H x W x C => C x H x W\n return im_to_torch(scipy.misc.imread(img_path, mode='RGB'))\n\n\ndef to_numpy(tensor):\n if torch.is_tensor(tensor):\n return tensor.cpu().numpy()\n elif type(tensor).__module__ != 'numpy':\n raise ValueError(\"Cannot convert {} to numpy array\"\n .format(type(tensor)))\n return tensor\n\n\ndef to_torch(ndarray):\n if type(ndarray).__module__ == 'numpy':\n return torch.from_numpy(ndarray)\n elif not torch.is_tensor(ndarray):\n raise ValueError(\"Cannot convert {} to torch tensor\"\n .format(type(ndarray)))\n return ndarray\n\n\ndef drawGaussian(img, pt, sigma):\n img = to_numpy(img)\n tmpSize = 3 * sigma\n # Check that any part of the gaussian is in-bounds\n ul = [int(pt[0] - tmpSize), int(pt[1] - tmpSize)]\n br = [int(pt[0] + tmpSize + 1), int(pt[1] + tmpSize + 1)]\n\n if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or\n br[0] < 0 or br[1] < 0):\n # If not, just return the image as is\n return to_torch(img)\n\n # Generate gaussian\n size = 2 * tmpSize + 1\n x = np.arange(0, size, 1, float)\n y = x[:, np.newaxis]\n x0 = y0 = size // 2\n sigma = size / 4.0\n # The gaussian is not normalized, we want the center value to equal 1\n g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))\n\n # Usable gaussian range\n g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]\n g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]\n # Image range\n img_x = max(0, ul[0]), min(br[0], img.shape[1])\n img_y = max(0, ul[1]), min(br[1], img.shape[0])\n\n img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]\n return to_torch(img)\n\n\ndef transformBox(pt, ul, br, inpH, inpW, resH, resW):\n center = torch.zeros(2)\n center[0] = (br[0] - 1 - ul[0]) / 2\n center[1] = (br[1] - 1 - ul[1]) / 2\n\n lenH = max(br[1] - ul[1], (br[0] - ul[0]) * inpH / inpW)\n lenW = lenH * inpW / inpH\n\n _pt = torch.zeros(2)\n _pt[0] = pt[0] - ul[0]\n _pt[1] = pt[1] - ul[1]\n # Move to center\n _pt[0] = _pt[0] + max(0, (lenW - 1) / 2 - center[0])\n _pt[1] = _pt[1] + max(0, (lenH - 1) / 2 - center[1])\n pt = (_pt * resH) / lenH\n pt[0] = round(float(pt[0]))\n pt[1] = round(float(pt[1]))\n return pt.int()\n\n\ndef transformBoxInvert(pt, ul, br, inpH, inpW, resH, resW):\n center = torch.zeros(2)\n center[0] = (br[0] - 1 - ul[0]) / 2\n center[1] = (br[1] - 1 - ul[1]) / 2\n\n lenH = max(br[1] - ul[1], (br[0] - ul[0]) * inpH / inpW)\n lenW = lenH * inpW / inpH\n\n _pt = (pt * lenH) / resH\n _pt[0] = _pt[0] - max(0, (lenW - 1) / 2 - center[0])\n _pt[1] = _pt[1] - max(0, (lenH - 1) / 2 - center[1])\n\n new_point = torch.zeros(2)\n new_point[0] = _pt[0] + ul[0]\n new_point[1] = _pt[1] + ul[1]\n return new_point\n\n\ndef cropBox(img, ul, br, resH, resW):\n ul = ul.int()\n br = (br - 1).int()\n # br = br.int()\n lenH = max((br[1] - ul[1]).item(), (br[0] - ul[0]).item() * resH / resW)\n lenW = lenH * resW / resH\n if img.dim() == 2:\n img = img[np.newaxis, :]\n\n box_shape = [br[1] - ul[1], br[0] - ul[0]]\n pad_size = [float((lenH - box_shape[0])) // 2, float((lenW - box_shape[1])) // 2]\n # Padding Zeros\n img[:, :ul[1], :] = 0\n img[:, :, :ul[0]] = 0\n img[:, br[1] + 1:, :], img[:, :, br[0] + 1:] = 0, 0\n\n src = np.zeros((3, 2), dtype=np.float32)\n dst = np.zeros((3, 2), dtype=np.float32)\n\n src[0, :] = np.array([ul[0] - pad_size[1], ul[1] - pad_size[0]], np.float32)\n src[1, :] = np.array([br[0] + pad_size[1], br[1] + pad_size[0]], np.float32)\n dst[0, :] = 0\n dst[1, :] = np.array([resW - 1, resH - 1], np.float32)\n\n src[2:, :] = get_3rd_point(src[0, :], src[1, :])\n dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])\n\n trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\n\n dst_img = cv2.warpAffine(torch_to_im(img), trans,\n (resW, resH), flags=cv2.INTER_LINEAR)\n\n return im_to_torch(torch.Tensor(dst_img))\n\n\ndef cv_rotate(img, rot, resW, resH):\n\n center = np.array((resW - 1, resH - 1)) / 2\n rot_rad = np.pi * rot / 180\n\n src_dir = get_dir([0, (resH - 1) * -0.5], rot_rad)\n dst_dir = np.array([0, (resH - 1) * -0.5], np.float32)\n\n src = np.zeros((3, 2), dtype=np.float32)\n dst = np.zeros((3, 2), dtype=np.float32)\n\n src[0, :] = center\n src[1, :] = center + src_dir\n dst[0, :] = [(resW - 1) * 0.5, (resH - 1) * 0.5]\n dst[1, :] = np.array([(resW - 1) * 0.5, (resH - 1) * 0.5]) + dst_dir\n\n src[2:, :] = get_3rd_point(src[0, :], src[1, :])\n dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])\n\n trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\n\n dst_img = cv2.warpAffine(torch_to_im(img), trans,\n (resW, resH), flags=cv2.INTER_LINEAR)\n\n return im_to_torch(torch.Tensor(dst_img))\n\n\ndef flip(x):\n assert (x.dim() == 3 or x.dim() == 4)\n if '0.4.1' in torch.__version__:\n dim = x.dim() - 1\n\n return x.flip(dims=(dim,))\n else:\n is_cuda = False\n if x.is_cuda:\n x = x.cpu()\n is_cuda = True\n x = x.numpy().copy()\n if x.ndim == 3:\n x = np.transpose(np.fliplr(np.transpose(x, (0, 2, 1))), (0, 2, 1))\n elif x.ndim == 4:\n for i in range(x.shape[0]):\n x[i] = np.transpose(\n np.fliplr(np.transpose(x[i], (0, 2, 1))), (0, 2, 1))\n x = torch.from_numpy(x.copy())\n if is_cuda:\n x = x.cuda()\n return x\n\n\ndef shuffleLR(x, dataset):\n flipRef = dataset.flipRef\n assert (x.dim() == 3 or x.dim() == 4)\n for pair in flipRef:\n dim0, dim1 = pair\n dim0 -= 1\n dim1 -= 1\n if x.dim() == 4:\n tmp = x[:, dim1].clone()\n x[:, dim1] = x[:, dim0].clone()\n x[:, dim0] = tmp.clone()\n #x[:, dim0], x[:, dim1] = deepcopy((x[:, dim1], x[:, dim0]))\n else:\n tmp = x[dim1].clone()\n x[dim1] = x[dim0].clone()\n x[dim0] = tmp.clone()\n #x[dim0], x[dim1] = deepcopy((x[dim1], x[dim0]))\n return x\n\n\ndef vis_frame(frame, im_res, format='coco'):\n '''\n frame: frame image\n im_res: im_res of predictions\n format: coco or mpii\n\n return rendered image\n '''\n if format == 'coco':\n l_pair = [\n (0, 1), (0, 2), (1, 3), (2, 4), # Head\n (5, 6), (5, 7), (7, 9), (6, 8), (8, 10),\n (5, 11), (6, 12), # Body\n (11, 13), (12, 14), (13, 15), (14, 16)\n ]\n p_color = [RED, RED, RED, RED, RED, YELLOW, YELLOW, YELLOW,\n YELLOW, YELLOW, YELLOW, GREEN, GREEN, GREEN, GREEN, GREEN, GREEN]\n line_color = [YELLOW, YELLOW, YELLOW, YELLOW, BLUE, BLUE,\n BLUE, BLUE, BLUE, PURPLE, PURPLE, RED, RED, RED, RED]\n elif format == 'mpii':\n l_pair = [\n (8, 9), (11, 12), (11, 10), (2, 1), (1, 0),\n (13, 14), (14, 15), (3, 4), (4, 5),\n (8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13)\n ]\n p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED,\n RED, PURPLE, PURPLE, PURPLE, RED, RED, BLUE, BLUE]\n line_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE,\n RED, RED, PURPLE, PURPLE, RED, RED, BLUE, BLUE]\n else:\n raise NotImplementedError\n\n im_name = im_res['imgname'].split('/')[-1]\n img = frame.copy()\n for human in im_res['result']:\n part_line = {}\n kp_preds = human['keypoints']\n kp_scores = human['kp_score']\n # Draw keypoints\n for n in range(kp_scores.shape[0]):\n if kp_scores[n] <= 0.15:\n continue\n cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])\n part_line[n] = (cor_x, cor_y)\n cv2.circle(img, (cor_x, cor_y), 4, p_color[n], -1)\n # Now create a mask of logo and create its inverse mask also\n #transparency = max(0, min(1, kp_scores[n]))\n #img = cv2.addWeighted(bg, transparency, img, 1, 0)\n # Draw limbs\n for i, (start_p, end_p) in enumerate(l_pair):\n if start_p in part_line and end_p in part_line:\n start_xy = part_line[start_p]\n end_xy = part_line[end_p]\n cv2.line(img, start_xy, end_xy,\n line_color[i], (0.5 * (kp_scores[start_p] + kp_scores[end_p])) + 1)\n #transparency = max(\n # 0, min(1, (kp_scores[start_p] + kp_scores[end_p])))\n #img = cv2.addWeighted(bg, transparency, img, 1, 0)\n return img\n\n\ndef get_3rd_point(a, b):\n direct = a - b\n return b + np.array([-direct[1], direct[0]], dtype=np.float32)\n\n\ndef get_dir(src_point, rot_rad):\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\n\n src_result = [0, 0]\n src_result[0] = src_point[0] * cs - src_point[1] * sn\n src_result[1] = src_point[0] * sn + src_point[1] * cs\n\n return src_result\n"
] |
[
[
"torch.Tensor",
"torch.zeros",
"numpy.arange",
"torch.is_tensor",
"torch.from_numpy",
"numpy.sin",
"numpy.cos",
"numpy.float32",
"numpy.transpose",
"numpy.array",
"numpy.exp",
"numpy.zeros"
]
] |
keke185321/combine-copy-
|
[
"de2eba77d8db5c9c1908aac1262590b80c2348ce"
] |
[
"tktest1.py"
] |
[
"#---------Imports\nfrom numpy import arange, sin, pi\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.figure import Figure\nimport Tkinter as Tk\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n#---------End of imports\n\nfig = plt.Figure()\n\nx = np.arange(0, 2*np.pi, 0.01) # x-array\n\ndef animate(i):\n line.set_ydata(np.sin(x+i/10.0)) # update the data\n return line,\n\nroot = Tk.Tk()\n\nlabel = Tk.Label(root,text=\"SHM Simulation\").grid(column=0, row=0)\n\ncanvas = FigureCanvasTkAgg(fig, master=root)\ncanvas.get_tk_widget().grid(column=0,row=1)\n\nax = fig.add_subplot(111)\nline, = ax.plot(x, np.sin(x))\nani = animation.FuncAnimation(fig, animate, np.arange(1, 200), interval=25, blit=False)\n\nTk.mainloop()\n"
] |
[
[
"numpy.arange",
"numpy.sin",
"matplotlib.pyplot.Figure",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg"
]
] |
ltalirz/asetk
|
[
"bdb31934a5eb49d601e492fc98078d27f5dd2ebd"
] |
[
"asetk/format/igor.py"
] |
[
"\"\"\"Classes for use with IGOR Pro\n\n\"\"\"\n\nimport re\nimport numpy as np\nfrom . import cube\n\nclass Axis(object):\n \"\"\"Represents an axis of an IGOR wave\"\"\"\n\n def __init__(self, symbol, min, delta, unit, wavename=None):\n self.symbol = symbol\n self.min = min\n self.delta = delta\n self.unit = unit\n self.wavename = wavename\n\n def __str__(self):\n \"\"\"Prints axis in itx format\n\n Note: SetScale/P expects minimum value and step-size\n \"\"\"\n delta = 0 if self.delta is None else self.delta\n s = \"X SetScale/P {symb} {min},{delta}, \\\"{unit}\\\", {name};\\n\"\\\n .format(symb=self.symbol, min=self.min, delta=delta,\\\n unit=self.unit, name=self.wavename)\n return s\n\n def read(self, string):\n \"\"\"Read axis from string\n\n Format: \n X SetScale/P x 0,2.01342281879195e-11,\"m\", data_00381_Up;\n SetScale d 0,0,\"V\", data_00381_Up\n \"\"\"\n match = re.search(\"SetScale/?P? (.) ([+-\\.\\de]+),([+-\\.\\de]+),\\\"(\\w+)\\\",\\s*(\\w+)\", string)\n self.symbol = match.group(1)\n self.min = float(match.group(2))\n self.delta = float(match.group(3))\n self.unit = match.group(4)\n self.wavename = match.group(5)\n\n\n\nclass Wave(object):\n \"\"\"A class template for IGOR waves of generic dimension\"\"\"\n\n def __init__(self, data, axes, name=None):\n \"\"\"Initialize IGOR wave of generic dimension\"\"\"\n self.data = data\n self.name = \"PYTHON_IMPORT\" if name is None else name\n self.axes = axes\n\n def __str__(self):\n \"\"\"Print IGOR wave\"\"\"\n s = \"\"\n s += \"IGOR\\n\"\n\n dimstring = \"(\"\n for i in range(len(self.data.shape)):\n dimstring += \"{}, \".format(self.data.shape[i])\n dimstring = dimstring[:-2] + \")\" \n\n s += \"WAVES/N={} {}\\n\".format(dimstring, self.name)\n s += \"BEGIN\\n\"\n s += self.print_data()\n s += \"END\\n\"\n for ax in self.axes:\n s += str(ax)\n return s\n\n def read(self, fname):\n \"\"\"Read IGOR wave\n \n Should work for any dimension.\n Tested so far only for 2d wave.\n \"\"\"\n f=open(fname, 'r')\n content=f.read()\n f.close()\n\n lines = content.split(\"\\r\")\n\n line = lines.pop(0)\n if not line == \"IGOR\":\n raise IOError(\"Files does not begin with 'IGOR'\")\n\n line = lines.pop(0)\n while not re.match(\"WAVES\",line):\n line = lines.pop(0)\n match = re.search(\"WAVES/N=\\(([\\d,]+)\\)\\s+(.+)\",line)\n grid = match.group(1).split(',')\n grid = np.array(grid, dtype=int)\n self.name = match.group(2)\n\n line = lines.pop(0)\n if not line == \"BEGIN\":\n raise IOError(\"Missing 'BEGIN' statement of data block\")\n\n # read data\n datastring = \"\"\n line = lines.pop(0)\n while not re.match(\"END\",line):\n datastring += line\n line = lines.pop(0)\n data = np.array(datastring.split(), dtype=float)\n self.data = data.reshape(grid)\n\n # read axes\n line = lines.pop(0)\n matches = re.findall(\"SetScale.+?(?:;|$)\", line)\n self.axes = []\n for match in matches:\n ax = Axis(None,None,None,None)\n ax.read(match)\n self.axes.append(ax)\n\n # the rest is discarded...\n #line = lines.pop(0)\n #print(line)\n\n @property\n def extent(self):\n \"\"\"Returns extent for plotting\"\"\"\n grid = self.data.shape\n extent = []\n for i in range(len(grid)):\n ax = self.axes[i]\n extent.append(ax.min)\n extent.append(ax.min+ax.delta*grid[i])\n\n return np.array(extent)\n\n\n def print_data(self):\n \"\"\"Determines how to print the data block.\n \n To be implemented by subclasses.\"\"\"\n\n def write(self, fname):\n f=open(fname, 'w')\n f.write(str(self))\n f.close()\n\n\nclass Wave1d(Wave):\n \"\"\"1d Igor wave\"\"\"\n \n default_parameters = dict(\n xmin = 0.0,\n xdelta = None,\n xlabel = 'x',\n ylabel = 'y',\n )\n\n def __init__(self, data=None, axes=None, name=\"1d\", **kwargs):\n \"\"\"Initialize 1d IGOR wave\"\"\"\n super(Wave1d, self).__init__(data, axes, name) \n\n self.parameters = self.default_parameters\n for key, value in kwargs.items():\n if key in self.parameters:\n self.parameters[key] = value\n else:\n raise KeyError(\"Unknown parameter {}\".format(key))\n\n if axes is None:\n p=self.parameters\n x = Axis(symbol='x', min=p['xmin'], delta=p['xdelta'], unit=p['xlabel'],\n wavename=self.name)\n self.axes = [x]\n\n def print_data(self):\n s = \"\"\n for line in self.data:\n s += \"{:12.6e}\\n\".format(float(line))\n\n return s\n \n\n\nclass Wave2d(Wave):\n \"\"\"2d Igor wave\"\"\"\n\n default_parameters = dict(\n xmin = 0.0,\n xdelta = None,\n xmax = None,\n xlabel = 'x',\n ymin = 0.0,\n ydelta = None,\n ymax = None,\n ylabel = 'y',\n )\n \n def __init__(self, data=None, axes=None, name=None, **kwargs):\n \"\"\"Initialize 2d Igor wave\n\n Parameters\n ----------\n \n * data \n * name \n * xmin, xdelta, xlabel \n * ymin, ydelta, ylabel \n \"\"\"\n super(Wave2d, self).__init__(data, axes=axes, name=name)\n\n self.parameters = self.default_parameters\n for key, value in kwargs.items():\n if key in self.parameters:\n self.parameters[key] = value\n else:\n raise KeyError(\"Unknown parameter {}\".format(key))\n\n if axes is None:\n p=self.parameters\n\n nx, ny = self.data.shape\n if p['xmax'] is None:\n p['xmax'] = p['xdelta'] * nx\n elif p['xdelta'] is None:\n p['xdelta'] = p['xmax'] / nx\n\n if p['ymax'] is None:\n p['ymax'] = p['ydelta'] * ny\n elif p['ydelta'] is None:\n p['ydelta'] = p['ymax'] / ny\n\n x = Axis(symbol='x', min=p['xmin'], delta=p['xdelta'], \n unit=p['xlabel'], wavename=self.name)\n y = Axis(symbol='y', min=p['ymin'], delta=p['ydelta'], \n unit=p['ylabel'], wavename=self.name)\n self.axes = [x,y]\n\n\n def print_data(self):\n \"\"\"Determines how to print the data block\"\"\"\n s = \"\"\n for line in self.data:\n for x in line:\n s += \"{:12.6e} \".format(x)\n s += \"\\n\"\n\n return s\n \n\n @classmethod\n def from_cube(cls, cube, dir, index, fname):\n \"\"\"Creates 2d Igor Wave from Gaussian Cube file\n \n Parameters\n ----------\n * cube : format.cube object containing cube file\n * dir : 'x', 'y' or 'z'\n * index: index of plane to be taken\n \"\"\"\n return NotImplementedError(\"Not yet implemented\")\n #tmp = Wave3d()\n #tmp.read_from_cube(fname)\n #return tmp\n\n def read_from_cube(self, cube, dir, index, fname=None):\n # To be implemented\n dir\n #super(Wave2d, self).__init__(\n # data=cube.get_plane(dir, index),\n # name=name,\n # axes=)\n # comment=comment,\n # t,origin,atoms,data)\n \n\n\nclass Wave3d(Wave):\n \"\"\"3d Igor wave intended for cube files (untested)\"\"\"\n\n @classmethod\n def from_cube_file(cls, fname):\n \"\"\"Creates 3d Igor Wave from Gaussian Cube file\"\"\"\n tmp = Wave3d()\n tmp.read_from_cube(fname)\n return tmp\n\n\n def copy(self, spectrum):\n \"\"\"Performs deep copy of spectrum.\"\"\"\n self.energylevels = [ el.copy() for el in spectrum.energylevels ]\n self.spins = cp.copy(spectrum.spins)\n\n def read_from_cube(self, fname):\n \"\"\"Reads 3d Igor Wave from Gaussian Cube file\"\"\"\n c = cube.from_file(fname, read_data=True)\n\n self.data = c.data\n self.name = c.title\n\n axes = []\n axes.append(Axis(\n symbol='x',\n min=c.origin[0],\n delta=c.dx,\n label=\"x [Bohr]\",\n name=self.name)\n )\n axes.append(Axis(\n symbol='y',\n min=c.origin[1],\n delta=c.dy,\n label=\"y [Bohr]\",\n name=self.name)\n )\n axes.append(Axis(\n symbol='z',\n min=c.origin[2],\n delta=c.dz,\n label=\"z [Bohr]\",\n name=self.name)\n )\n axes.append(Axis(\n symbol='d',\n min=np.min(d.data),\n delta=0,\n label=\"data [Unknown]\",\n name=self.name)\n )\n self.axes = axes\n\n\n\n#class WfnCube(cube.Cube):\n# \"\"\"Gaussian cube file written by CP2K\n#\n# CP2K writes the index of level and spin into the\n# comment line of the cube file\n# \"\"\"\n#\n# def __init__(self, title=None, comment=None, origin=None, atoms=None, \n# data=None, spin=None, wfn=None, energy=None, occupation=None):\n# \"\"\"Standard constructor, all parameters default to None.\n# \n# energy and occupation are not stored in the cube file,\n# but can be assigned by linking the cube file with the \n# output from the calculation.\n# \"\"\"\n# super(WfnCube, self).__init__(title,comment,origin,atoms,data)\n# self.spin = spin\n# self.wfn = wfn\n# self.energy = energy\n# self.occupation = occupation\n#\n# @classmethod\n# def from_file(cls, fname, read_data=False):\n# \"\"\"Creates Cube from cube file\"\"\"\n# tmp = WfnCube()\n# tmp.read_cube_file(fname, read_data=read_data)\n# return tmp\n#\n# def read_cube_file(self, fname, read_data=False, v=1):\n# \"\"\"Reads header and/or data of cube file\"\"\"\n# super(WfnCube, self).read_cube_file(fname, read_data, v)\n#\n# # CP2K stores information on the level/spin index\n# # in the comment line\n# commentregex = 'WAVEFUNCTION\\s+(\\d+)\\s+spin\\s+(\\d+)'\n# match = re.search(commentregex, self.comment)\n# self.wfn = int(match.group(1))\n# self.spin = int(match.group(2))\n"
] |
[
[
"numpy.array",
"numpy.min"
]
] |
ChristinaB/Topnet-WM
|
[
"abfed11a3792a43ad23000cbf3b2cb9161f19218"
] |
[
"Preprocessing/MODFLOW_to_TopNet.py"
] |
[
"import numpy as np\nimport os\n\nMOD_indx = {2:0, 3:1, 4:2, 5:3, 6:4, 7:5, 8:6, 9:7, 10:8, 11:9,\n 12:10, 13:11, 14:12, 15:13, 16:14, 17:15, 18:16, 19:17, 20:18, 21:19,\n 22:20, 23:21, 24:22, 25:23, 26:24, 27:25, 28:26, 29:27, 30:28, 31:29,\n 32:30, 33:31, 34:32, 35:33, 36:34, 38:35}\nTop_to_Mod = {103:2, 115:3, 123:4, 124:5, 117:6, 92:7, 125:8, 122:9, 86:10, 91:11,\n 107:12, 118:13, 128:14, 84:15, 121:16, 95:17, 81:18, 104:19, 93:20, 130:21,\n 3:22, 83:23, 106:24, 108:25, 126:26, 129:27, 89:28, 85:29, 120:30, 94:31,\n 80:32, 82:33, 105:34, 116:35, 90:36, 88:38}\n\nMOD_ID = [ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,\n 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,\n 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,\n 32, 33, 34, 35, 36, 38]\nMOD_ID = np.asarray(MOD_ID)\n\nTop_ID = [103, 115, 123, 124, 117, 92, 125, 122, 86, 91,\n 107, 118, 128, 84, 121, 95, 81, 104, 93, 130,\n 3, 83, 106, 108, 126, 129, 89, 85, 120, 94,\n 80, 82, 105, 116, 90, 88]\nTop_ID = np.asarray(Top_ID)\n\nnames = { 2:\"Blaine\", 3:\"Breckenridge\", 4:\"California\", 5:\"Cherry Point\",\n 6:\"Dale\", 7:\"Deer\", 8:\"Fazon\", 9:\"Fingalson\",\n 10:\"Fishtrap\", 11:\"Fourmile\", 12:\"Haynie\", 13:\"Johnson\",\n 14:\"Jordan\", 15:\"Kamm\", 16:\"Lake Terrell\", 17:\"Little Campbell\",\n 18:\"Lower Anderson\", 19:\"Lower Dakota\", 20:\"Lummi Peninsula West\", 21:\"Lummi River Delta\",\n 22:\"Nooksack Deming to Everson\", 23:\"North Fork Anderson\", 24:\"North Fork Dakota\", 25:\"Saar\",\n 26:\"Sandy Point\", 27:\"Schell\", 28:\"Schneider\", 29:\"Scott\",\n 30:\"Semiahmoo\", 31:\"Silver\", 32:\"Smith\", 33:\"South Fork Anderson\",\n 34:\"South Fork Dakota\", 35:\"Swift\", 36:\"Ten Mile\", 38:\"Wiser Lake/Cougar Creek\"}\n\n# ----------------------------------\ntopinpFile = open('topinpBertrand.dat','r')\nlines = topinpFile.readlines()\ntopinpFile.close()\nnstepsBertrand = int(lines[3].split()[0])\n\ntopinpFile = open('modelspcBertrand.dat','r')\nlines = topinpFile.readlines()\ntopinpFile.close()\nnSubBertrand = int(lines[2].split()[1])\n# ----------------------------------\ntopinpFile = open('topinpWRIA1.dat','r')\nlines = topinpFile.readlines()\ntopinpFile.close()\nnstepsLENS = int(lines[3].split()[0]) # WRIA1 and LENS are are the same\n\ntopinpFile = open('modelspcWRIA1.dat','r')\nlines = topinpFile.readlines()\ntopinpFile.close()\nnSubWRIA1 = int(lines[2].split()[1])\nnSubLENS = 36\n# ----------------------------------\nprint('Reading MODFLOW output.')\nmodflowFile = open('Drainage_DTW_censored.dat','r')\nDTW_SS_M = np.zeros(nSubLENS+nSubBertrand, dtype='d')\nDTW_IRR_M = np.zeros(nSubLENS+nSubBertrand, dtype='d')\nDTW_NIRR_M = np.zeros(nSubLENS+nSubBertrand, dtype='d')\nFlags = np.full(nSubWRIA1, -1, dtype='i')\nline = modflowFile.readline() # discard the header\nfor jsub in range(nSubLENS):\n line = modflowFile.readline()\n splitLine = line.split()\n modID = int(splitLine[0])\n if modID == 37: # skip ahead\n line = modflowFile.readline()\n splitLine = line.split()\n modID = int(splitLine[0])\n DTW_SS_M[jsub] = float(splitLine[-3])*0.3048\n DTW_IRR_M[jsub] = float(splitLine[-2])*0.3048\n DTW_NIRR_M[jsub] = float(splitLine[-1])*0.3048\n Flags[Top_ID[jsub]-1] = Top_ID[jsub]\n #print('jsub {0:3d} topID {1:3d} modID {2:3d} {3:27s} DTW_SS{4:8.4f} DTW_IRR{5:8.4f} DTW_NONIRR{6:8.4f}'.format(\n #jsub, topID, modID, names[modID], DTW_SS_M[jsub], DTW_IRR_M[jsub], DTW_NIRR_M[jsub]))\nprint('Reading WRIA1 depths to water, {0:d} drainages'.format(nSubWRIA1))\nzbar = np.zeros((nstepsLENS+1, nSubWRIA1), dtype='d')\nzFile = open('zbar.dat','r')\nlines = zFile.readlines()\nfor line in lines:\n splitLine = line.split()\n nt = int(splitLine[0])\n jsub = int(splitLine[1])-1\n zbar[nt,jsub] = float(splitLine[2]) # The rest are the same.\n\n# A relevant TopNet comment about the six regions in each subbasin:\n# The one line below is a substantive change DGT 10/13/12. It amounts to an assumption of\n# lumped depth to water table, rather than separate depth to water table for each drainage and\n# irrigation component. It was made to fix the issue of groundwater levels declining indefinitely\n# due to there being no recharge from artificially drained areas\n\n# --------------------- starting with LENS ---------------------------------\nprint('Writing LENS depth-to-water files, {0:d} drainages'.format(nSubWRIA1))\nif not os.access('LENS', os.F_OK):\n os.mkdir('LENS')\nos.chdir('LENS')\nzbar_ssFile = open('zbar_ss.dat','w')\nzbar_irrFile = open('zbar_irr.dat','w')\nzbar_nirrFile = open('zbar_nirr.dat','w')\nistep = 365 # a placeholder\nfor jsub in range(nSubWRIA1):\n if Flags[jsub] > -1:\n mod_ID = Top_to_Mod[Flags[jsub]]\n mod_indx = MOD_indx[mod_ID]\n #print('{0:4d} {1:26s} {2:5.1f} {3:5.1f} {4:5.1f}'.format(mod_indx, names[mod_ID],\n #DTW_SS_M[mod_indx]/0.3048, DTW_IRR_M[mod_indx]/0.3048, DTW_NIRR_M[mod_indx]/0.3048))\n zbar_ssFile.write('{0:4d} {1:17.9f}\\n'.format(jsub, DTW_SS_M[mod_indx]))\n zbar_irrFile.write('{0:4d} {1:17.9f}\\n'.format(jsub, DTW_IRR_M[mod_indx]))\n zbar_nirrFile.write('{0:4d} {1:17.9f}\\n'.format(jsub, DTW_NIRR_M[mod_indx]))\n else:\n zbar_ssFile.write('{0:4d} {1:17.9f}\\n'.format(jsub, zbar[istep,jsub]))\n zbar_irrFile.write('{0:4d} {1:17.9f}\\n'.format(jsub, zbar[istep,jsub]))\n zbar_nirrFile.write('{0:4d} {1:17.9f}\\n'.format(jsub, zbar[istep,jsub]))\nzbar_ssFile.close()\nzbar_irrFile.close()\nzbar_nirrFile.close()\n\n# ----------------- now Bertrand ---------------------------------------------\nprint('Reading Bertrand depth-to-water files, {0:d} drainages'.format(nSubBertrand))\nos.chdir('..')\nif not os.access('Bertrand', os.F_OK):\n os.mkdir('Bertrand')\nos.chdir('Bertrand')\nfor jsub in range(nSubLENS,nSubLENS+nSubBertrand):\n line = modflowFile.readline()\n splitLine = line.split()\n modID = int(splitLine[0])\n topID = int(splitLine[0][2:])\n DTW_SS_M[jsub] = float(splitLine[-3])*0.3048\n DTW_IRR_M[jsub] = float(splitLine[-2])*0.3048\n DTW_NIRR_M[jsub] = float(splitLine[-1])*0.3048\n #print('{0:3d} {1:4d} BertrandSub-{2:d} DTW_SS{3:8.4f} DTW_IRR{4:8.4f} DTW_NONIRR{5:8.4f}'.format(topID,\n #modID, topID, DTW_SS_M[jsub], DTW_IRR_M[jsub], DTW_NIRR_M[jsub]))\nprint('Writing Bertrand depth-to-water files.')\nzbar_ssFile = open('zbar_ss.dat','w')\nzbar_irrFile = open('zbar_irr.dat','w')\nzbar_nirrFile = open('zbar_nirr.dat','w')\nfor j in range(nSubLENS,nSubLENS+nSubBertrand):\n jsub = j - nSubLENS\n print('{0:4d} {1:5.1f} {2:5.1f} {3:5.1f}'.format(jsub,\n DTW_SS_M[j]/0.3048, DTW_IRR_M[j]/0.3048, DTW_NIRR_M[j]/0.3048))\n zbar_ssFile.write('{0:4d} {1:17.9f}\\n'.format(jsub, DTW_SS_M[j]))\n zbar_irrFile.write('{0:4d} {1:17.9f}\\n'.format(jsub, DTW_IRR_M[j]))\n zbar_nirrFile.write('{0:4d} {1:17.9f}\\n'.format(jsub, DTW_NIRR_M[j]))\nzbar_ssFile.close()\nzbar_irrFile.close()\nzbar_nirrFile.close()\n"
] |
[
[
"numpy.asarray",
"numpy.zeros",
"numpy.full"
]
] |
travbid/tensorflow
|
[
"a53365719e445edc5b48f0877f1d85b8d5837384"
] |
[
"tensorflow/python/keras/optimizer_v2/optimizer_v2.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Version 2 of class Optimizer.\"\"\"\n# pylint: disable=g-bad-name\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport functools\n\nimport six\n\nfrom tensorflow.python.distribute import distribution_strategy_context as distribute_ctx\nfrom tensorflow.python.distribute import reduce_util as ds_reduce_util\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras.engine import base_layer_utils\nfrom tensorflow.python.keras.optimizer_v2 import learning_rate_schedule\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import gradients\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.saved_model import revived_types\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import keras_export\n\n\ndef _deduplicate_indexed_slices(values, indices):\n \"\"\"Sums `values` associated with any non-unique `indices`.\n\n Args:\n values: A `Tensor` with rank >= 1.\n indices: A one-dimensional integer `Tensor`, indexing into the first\n dimension of `values` (as in an IndexedSlices object).\n\n Returns:\n A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a\n de-duplicated version of `indices` and `summed_values` contains the sum of\n `values` slices associated with each unique index.\n \"\"\"\n unique_indices, new_index_positions = array_ops.unique(indices)\n summed_values = math_ops.unsorted_segment_sum(\n values, new_index_positions,\n array_ops.shape(unique_indices)[0])\n return (summed_values, unique_indices)\n\n\[email protected]_metaclass(abc.ABCMeta)\n@keras_export(\"keras.optimizers.Optimizer\")\nclass OptimizerV2(trackable.Trackable):\n \"\"\"Updated base class for optimizers.\n\n This class defines the API to add Ops to train a model. You never use this\n class directly, but instead instantiate one of its subclasses such as\n `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`.\n\n ### Usage\n\n ```python\n # Create an optimizer with the desired parameters.\n opt = tf.keras.optimizers.SGD(learning_rate=0.1)\n # `loss` is a callable that takes no argument and returns the value\n # to minimize.\n loss = lambda: 3 * var1 * var1 + 2 * var2 * var2\n # In graph mode, returns op that minimizes the loss by updating the listed\n # variables.\n opt_op = opt.minimize(loss, var_list=[var1, var2])\n opt_op.run()\n # In eager mode, simply call minimize to update the list of variables.\n opt.minimize(loss, var_list=[var1, var2])\n ```\n\n ### Custom training loop with Keras models\n\n In Keras models, sometimes variables are created when the model is first\n called, instead of construction time. Examples include 1) sequential models\n without input shape pre-defined, or 2) subclassed models. Pass var_list as\n callable in these cases.\n\n Example:\n ```python\n opt = tf.keras.optimizers.SGD(learning_rate=0.1)\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Dense(num_hidden, activation='relu'))\n model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid')\n loss_fn = lambda: tf.keras.losses.mse(model(input), output)\n var_list_fn = lambda: model.trainable_weights\n for input, output in data:\n opt.minimize(loss_fn, var_list_fn)\n ```\n\n ### Processing gradients before applying them.\n\n Calling `minimize()` takes care of both computing the gradients and\n applying them to the variables. If you want to process the gradients\n before applying them you can instead use the optimizer in three steps:\n\n 1. Compute the gradients with `tf.GradientTape`.\n 2. Process the gradients as you wish.\n 3. Apply the processed gradients with `apply_gradients()`.\n\n Example:\n\n ```python\n # Create an optimizer.\n opt = tf.keras.optimizers.SGD(learning_rate=0.1)\n\n # Compute the gradients for a list of variables.\n with tf.GradientTape() as tape:\n loss = <call_loss_function>\n vars = <list_of_variables>\n grads = tape.gradient(loss, vars)\n processed_grads = [process_gradient(g) for g in grads]\n grads_and_vars = zip(processed_grads, var_list)\n\n # grads_and_vars is a list of tuples (gradient, variable). Do whatever you\n # need to the 'gradient' part, for example cap them, etc.\n capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]\n\n # Ask the optimizer to apply the capped gradients.\n opt.apply_gradients(capped_grads_and_vars)\n ```\n\n ### Use with `tf.distribute.Strategy`.\n\n This optimizer class is `tf.distribute.Strategy` aware, which means it\n automatically sums gradients across all replicas. To average gradients,\n you divide your loss by the global batch size, which is done\n automatically if you use `tf.keras` built-in training or evaluation loops.\n See the `reduction` argument of your loss which should be set to\n `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or\n `tf.keras.losses.Reduction.SUM` for not.\n\n If you are not using these and you want to average gradients, you should use\n `tf.math.reduce_sum` to add up your per-example losses and then divide by the\n global batch size. Note that when using `tf.distribute.Strategy`, the first\n component of a tensor's shape is the *replica-local* batch size, which is off\n by a factor equal to the number of replicas being used to compute a single\n step. As a result, using `tf.math.reduce_mean` will give the wrong answer,\n resulting in gradients that can be many times too big.\n\n ### Variable Constraint\n\n All Keras optimizers respect variable constraints. If constraint function is\n passed to any variable, the constraint will be applied to the variable after\n the gradient has been applied to the variable.\n Important: If gradient is sparse tensor, variable constraint is not supported.\n\n ### Thread Compatibility\n\n The entire optimizer is currently thread compatible, not thread-safe. The user\n needs to perform synchronization if necessary.\n\n ### Slots\n\n Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage\n additional variables associated with the variables to train. These are called\n <i>Slots</i>. Slots have names and you can ask the optimizer for the names of\n the slots that it uses. Once you have a slot name you can ask the optimizer\n for the variable it created to hold the slot value.\n\n This can be useful if you want to log debug a training algorithm, report stats\n about the slots, etc.\n\n ### Hyper parameters\n\n These are arguments passed to the optimizer subclass constructor\n (the `__init__` method), and then passed to `self._set_hyper()`.\n They can be either regular Python values (like 1.0), tensors, or\n callables. If they are callable, the callable will be called during\n `apply_gradients()` to get the value for the hyper parameter.\n\n Hyper parameters can be overwritten through user code:\n\n Example:\n\n ```python\n # Create an optimizer with the desired parameters.\n opt = tf.keras.optimizers.SGD(learning_rate=0.1)\n # `loss` is a callable that takes no argument and returns the value\n # to minimize.\n loss = lambda: 3 * var1 + 2 * var2\n # In eager mode, simply call minimize to update the list of variables.\n opt.minimize(loss, var_list=[var1, var2])\n # update learning rate\n opt.learning_rate = 0.05\n opt.minimize(loss, var_list=[var1, var2])\n ```\n\n ### Write a customized optimizer.\n If you intend to create your own optimization algorithm, simply inherit from\n this class and override the following methods:\n\n - resource_apply_dense (update variable given gradient tensor is dense)\n - resource_apply_sparse (update variable given gradient tensor is sparse)\n - create_slots (if your optimizer algorithm requires additional variables)\n - get_config (serialization of the optimizer, include all hyper parameters)\n \"\"\"\n\n def __init__(self, name, **kwargs):\n \"\"\"Create a new Optimizer.\n\n This must be called by the constructors of subclasses.\n Note that Optimizer instances should not bind to a single graph,\n and so shouldn't keep Tensors as member variables. Generally\n you should be able to use the _set_hyper()/state.get_hyper()\n facility instead.\n\n This class in stateful and thread-compatible.\n\n Args:\n name: A non-empty string. The name to use for accumulators created\n for the optimizer.\n **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,\n `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip\n gradients by value, `decay` is included for backward compatibility to\n allow time inverse decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n\n Raises:\n ValueError: If name is malformed.\n RuntimeError: If _create_slots has been overridden instead of\n _create_vars.\n \"\"\"\n allowed_kwargs = {\"clipnorm\", \"clipvalue\", \"lr\", \"decay\"}\n for k in kwargs:\n if k not in allowed_kwargs:\n raise TypeError(\"Unexpected keyword argument \"\n \"passed to optimizer: \" + str(k))\n # checks that all keyword arguments are non-negative.\n if kwargs[k] < 0:\n raise ValueError(\"Expected {} >= 0, received: {}\".format(k, kwargs[k]))\n\n self._use_locking = True\n self._init_set_name(name)\n # in graph mode, name_scope performs uniquification, so keep scope_context.\n with backend.name_scope(self._name) as name_scope:\n self._scope_ctx = name_scope\n self._hyper = {}\n # dict: {variable name : {slot name : variable}}\n self._slots = {}\n self._slot_names = []\n self._weights = []\n self._iterations = None\n\n # For implementing Trackable. Stores information about how to restore\n # slot variables which have not yet been created\n # (trackable._CheckpointPosition objects).\n # {slot_name :\n # {_var_key(variable_to_train): [checkpoint_position, ... ], ... },\n # ... }\n self._deferred_slot_restorations = {}\n\n decay = kwargs.pop(\"decay\", 0.0)\n if decay < 0.:\n raise ValueError(\"decay cannot be less than 0: {}\".format(decay))\n self._initial_decay = decay\n if \"clipnorm\" in kwargs:\n self.clipnorm = kwargs.pop(\"clipnorm\")\n if \"clipvalue\" in kwargs:\n self.clipvalue = kwargs.pop(\"clipvalue\")\n\n self._hypers_created = False\n\n def minimize(self, loss, var_list, grad_loss=None, name=None):\n \"\"\"Minimize `loss` by updating `var_list`.\n\n This method simply computes gradient using `tf.GradientTape` and calls\n `apply_gradients()`. If you want to process the gradient before applying\n then call `tf.GradientTape` and `apply_gradients()` explicitly instead\n of using this function.\n\n Args:\n loss: A callable taking no arguments which returns the value to minimize.\n var_list: list or tuple of `Variable` objects to update to minimize\n `loss`, or a callable returning the list or tuple of `Variable` objects.\n Use callable when the variable list would otherwise be incomplete before\n `minimize` since the variables are created at the first time `loss` is\n called.\n grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.\n name: Optional name for the returned operation.\n\n Returns:\n An Operation that updates the variables in `var_list`. If `global_step`\n was not `None`, that operation also increments `global_step`.\n\n Raises:\n ValueError: If some of the variables are not `Variable` objects.\n\n \"\"\"\n grads_and_vars = self._compute_gradients(\n loss, var_list=var_list, grad_loss=grad_loss)\n\n return self.apply_gradients(grads_and_vars, name=name)\n\n def _compute_gradients(self, loss, var_list, grad_loss=None):\n \"\"\"Compute gradients of `loss` for the variables in `var_list`.\n\n This is the first part of `minimize()`. It returns a list\n of (gradient, variable) pairs where \"gradient\" is the gradient\n for \"variable\". Note that \"gradient\" can be a `Tensor`, an\n `IndexedSlices`, or `None` if there is no gradient for the\n given variable.\n\n Args:\n loss: A callable taking no arguments which returns the value to minimize.\n var_list: list or tuple of `Variable` objects to update to minimize\n `loss`, or a callable returning the list or tuple of `Variable` objects.\n Use callable when the variable list would otherwise be incomplete before\n `minimize` and the variables are created at the first time when `loss`\n is called.\n grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.\n\n Returns:\n A list of (gradient, variable) pairs. Variable is always present, but\n gradient can be `None`.\n\n Raises:\n TypeError: If `var_list` contains anything else than `Variable` objects.\n ValueError: If some arguments are invalid, or var_list is None.\n \"\"\"\n # TODO(josh11b): Test that we handle weight decay in a reasonable way.\n with backprop.GradientTape() as tape:\n if not callable(var_list):\n tape.watch(var_list)\n loss_value = loss()\n if callable(var_list):\n var_list = var_list()\n var_list = nest.flatten(var_list)\n with backend.name_scope(self._scope_ctx):\n grads = tape.gradient(loss_value, var_list, grad_loss)\n\n if hasattr(self, \"clipnorm\"):\n grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]\n if hasattr(self, \"clipvalue\"):\n grads = [\n clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)\n for g in grads\n ]\n\n grads_and_vars = list(zip(grads, var_list))\n self._assert_valid_dtypes([\n v for g, v in grads_and_vars\n if g is not None and v.dtype != dtypes.resource\n ])\n\n return grads_and_vars\n\n def get_gradients(self, loss, params):\n \"\"\"Returns gradients of `loss` with respect to `params`.\n\n Arguments:\n loss: Loss tensor.\n params: List of variables.\n\n Returns:\n List of gradient tensors.\n\n Raises:\n ValueError: In case any gradient cannot be computed (e.g. if gradient\n function not implemented).\n \"\"\"\n params = nest.flatten(params)\n with backend.get_graph().as_default(), backend.name_scope(self._scope_ctx):\n grads = gradients.gradients(loss, params)\n for grad, param in zip(grads, params):\n if grad is None:\n raise ValueError(\"Variable {} has `None` for gradient. \"\n \"Please make sure that all of your ops have a \"\n \"gradient defined (i.e. are differentiable). \"\n \"Common ops without gradient: \"\n \"K.argmax, K.round, K.eval.\".format(param))\n if hasattr(self, \"clipnorm\"):\n grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]\n if hasattr(self, \"clipvalue\"):\n grads = [\n clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)\n for g in grads\n ]\n return grads\n\n def apply_gradients(self, grads_and_vars, name=None):\n \"\"\"Apply gradients to variables.\n\n This is the second part of `minimize()`. It returns an `Operation` that\n applies gradients.\n\n Args:\n grads_and_vars: List of (gradient, variable) pairs.\n name: Optional name for the returned operation. Default to the name\n passed to the `Optimizer` constructor.\n\n Returns:\n An `Operation` that applies the specified gradients. If `global_step`\n was not None, that operation also increments `global_step`.\n\n Raises:\n TypeError: If `grads_and_vars` is malformed.\n ValueError: If none of the variables have gradients.\n \"\"\"\n grads_and_vars = _filter_grads(grads_and_vars)\n var_list = [v for (_, v) in grads_and_vars]\n\n with backend.name_scope(self._scope_ctx):\n # Create iteration if necessary.\n with ops.init_scope():\n _ = self.iterations\n self._create_hypers()\n self._create_slots(var_list)\n\n self._prepare(var_list)\n\n return distribute_ctx.get_replica_context().merge_call(\n self._distributed_apply,\n args=(grads_and_vars,),\n kwargs={\"name\": name})\n\n def _distributed_apply(self, distribution, grads_and_vars, name):\n \"\"\"`apply_gradients` using a `DistributionStrategy`.\"\"\"\n reduced_grads = distribution.extended.batch_reduce_to(\n ds_reduce_util.ReduceOp.SUM, grads_and_vars)\n var_list = [v for _, v in grads_and_vars]\n grads_and_vars = zip(reduced_grads, var_list)\n\n def apply_grad_to_update_var(var, grad):\n \"\"\"Apply gradient to variable.\"\"\"\n if isinstance(var, ops.Tensor):\n raise NotImplementedError(\"Trying to update a Tensor \", var)\n if isinstance(grad, ops.IndexedSlices):\n if var.constraint is not None:\n raise RuntimeError(\n \"Cannot use a constraint function on a sparse variable.\")\n return self._resource_apply_sparse_duplicate_indices(\n grad.values, var, grad.indices)\n update_op = self._resource_apply_dense(grad, var)\n if var.constraint is not None:\n with ops.control_dependencies([update_op]):\n return var.assign(var.constraint(var))\n else:\n return update_op\n\n update_ops = []\n with backend.name_scope(name or self._name):\n for grad, var in grads_and_vars:\n scope_name = (\"\" if ops.executing_eagerly_outside_functions() else\n \"_\" + var.op.name)\n with backend.name_scope(\"update\" + scope_name):\n update_ops.extend(\n distribution.extended.update(\n var, apply_grad_to_update_var, args=(grad,), group=False))\n\n any_symbolic = any(isinstance(i, ops.Operation) or\n tf_utils.is_symbolic_tensor(i) for i in update_ops)\n if not context.executing_eagerly() or any_symbolic:\n # If the current context is graph mode or any of the update ops are\n # symbolic then the step update should be carried out under a graph\n # context. (eager updates execute immediately)\n with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access\n with ops.control_dependencies(update_ops):\n return self._iterations.assign_add(1).op\n\n return self._iterations.assign_add(1)\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n grads_and_vars = list(zip(grads, params))\n self._assert_valid_dtypes([\n v for g, v in grads_and_vars\n if g is not None and v.dtype != dtypes.resource\n ])\n return [self.apply_gradients(grads_and_vars)]\n\n def _set_hyper(self, name, value):\n \"\"\"set hyper `name` to value. value can be callable, tensor, numeric.\"\"\"\n if isinstance(value, trackable.Trackable):\n self._track_trackable(value, name, overwrite=True)\n if name not in self._hyper:\n self._hyper[name] = value\n else:\n prev_value = self._hyper[name]\n if (callable(prev_value)\n or isinstance(prev_value,\n (ops.Tensor, int, float,\n learning_rate_schedule.LearningRateSchedule))\n or isinstance(value, learning_rate_schedule.LearningRateSchedule)):\n self._hyper[name] = value\n else:\n backend.set_value(self._hyper[name], value)\n\n def _get_hyper(self, name, dtype=None):\n if not self._hypers_created:\n self._create_hypers()\n value = self._hyper[name]\n if isinstance(value, learning_rate_schedule.LearningRateSchedule):\n return value\n if callable(value):\n value = value()\n if dtype:\n return math_ops.cast(value, dtype)\n else:\n return value\n\n def __getattribute__(self, name):\n \"\"\"Overridden to support hyperparameter access.\"\"\"\n try:\n return super(OptimizerV2, self).__getattribute__(name)\n except AttributeError as e:\n # Needed to avoid infinite recursion with __setattr__.\n if name == \"_hyper\":\n raise e\n # Backwards compatibility with Keras optimizers.\n if name == \"lr\":\n name = \"learning_rate\"\n if name in self._hyper:\n return self._get_hyper(name)\n raise e\n\n def __setattr__(self, name, value):\n \"\"\"Override setattr to support dynamic hyperparameter setting.\"\"\"\n # Backwards compatibility with Keras optimizers.\n if name == \"lr\":\n name = \"learning_rate\"\n if hasattr(self, \"_hyper\") and name in self._hyper:\n self._set_hyper(name, value)\n else:\n super(OptimizerV2, self).__setattr__(name, value)\n\n def get_slot_names(self):\n \"\"\"A list of names for this optimizer's slots.\"\"\"\n return self._slot_names\n\n def add_slot(self, var, slot_name, initializer=\"zeros\"):\n \"\"\"Add a new slot variable for `var`.\"\"\"\n if slot_name not in self._slot_names:\n self._slot_names.append(slot_name)\n var_key = _var_key(var)\n slot_dict = self._slots.setdefault(var_key, {})\n weight = slot_dict.get(slot_name, None)\n if weight is None:\n if isinstance(initializer, six.string_types) or callable(initializer):\n initializer = initializers.get(initializer)\n initial_value = functools.partial(\n initializer, shape=var.shape, dtype=var.dtype)\n else:\n initial_value = initializer\n strategy = distribute_ctx.get_strategy()\n with strategy.extended.colocate_vars_with(var):\n weight = tf_variables.Variable(\n name=\"%s/%s\" % (var._shared_name, slot_name), # pylint: disable=protected-access\n dtype=var.dtype,\n trainable=False,\n initial_value=initial_value)\n backend.track_variable(weight)\n slot_dict[slot_name] = weight\n self._restore_slot_variable(\n slot_name=slot_name, variable=var,\n slot_variable=weight)\n self._weights.append(weight)\n return weight\n\n def get_slot(self, var, slot_name):\n var_key = _var_key(var)\n slot_dict = self._slots[var_key]\n return slot_dict[slot_name]\n\n def _prepare(self, var_list):\n pass\n\n def _create_hypers(self):\n if self._hypers_created:\n return\n # Iterate hyper values deterministically.\n for name, value in sorted(self._hyper.items()):\n if isinstance(value, ops.Tensor) or callable(value):\n continue\n else:\n self._hyper[name] = self.add_weight(\n name,\n shape=[],\n trainable=False,\n initializer=value,\n aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)\n self._hypers_created = True\n\n @property\n def iterations(self):\n \"\"\"Variable. The number of training steps this Optimizer has run.\"\"\"\n if self._iterations is None:\n self._iterations = self.add_weight(\n \"iter\",\n shape=[],\n dtype=dtypes.int64,\n trainable=False,\n aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)\n self._weights.append(self._iterations)\n return self._iterations\n\n @iterations.setter\n def iterations(self, variable):\n if self._iterations is not None:\n raise RuntimeError(\"Cannot set `iterations` to a new Variable after \"\n \"the Optimizer weights have been created\")\n self._iterations = variable\n self._weights.append(self._iterations)\n\n def _decayed_lr(self, var_dtype):\n \"\"\"Get decayed learning rate as a Tensor with dtype=var_dtype.\"\"\"\n lr_t = self._get_hyper(\"learning_rate\", var_dtype)\n if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule):\n local_step = math_ops.cast(self.iterations, var_dtype)\n lr_t = math_ops.cast(lr_t(local_step), var_dtype)\n if self._initial_decay > 0.:\n local_step = math_ops.cast(self.iterations, var_dtype)\n decay_t = self._get_hyper(\"decay\", var_dtype)\n lr_t = lr_t / (1. + decay_t * local_step)\n return lr_t\n\n @abc.abstractmethod\n def get_config(self):\n \"\"\"Returns the config of the optimimizer.\n\n An optimizer config is a Python dictionary (serializable)\n containing the configuration of an optimizer.\n The same optimizer can be reinstantiated later\n (without any saved state) from this configuration.\n\n Returns:\n Python dictionary.\n \"\"\"\n config = {\"name\": self._name}\n if hasattr(self, \"clipnorm\"):\n config[\"clipnorm\"] = self.clipnorm\n if hasattr(self, \"clipvalue\"):\n config[\"clipvalue\"] = self.clipvalue\n return config\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n \"\"\"Creates an optimizer from its config.\n\n This method is the reverse of `get_config`,\n capable of instantiating the same optimizer from the config\n dictionary.\n\n Arguments:\n config: A Python dictionary, typically the output of get_config.\n custom_objects: A Python dictionary mapping names to additional Python\n objects used to create this optimizer, such as a function used for a\n hyperparameter.\n\n Returns:\n An optimizer instance.\n \"\"\"\n if \"lr\" in config:\n config[\"learning_rate\"] = config.pop(\"lr\")\n if \"learning_rate\" in config:\n if isinstance(config[\"learning_rate\"], dict):\n config[\"learning_rate\"] = learning_rate_schedule.deserialize(\n config[\"learning_rate\"], custom_objects=custom_objects)\n return cls(**config)\n\n def _serialize_hyperparameter(self, hyperparameter_name):\n \"\"\"Serialize a hyperparameter that can be a float, callable, or Tensor.\"\"\"\n value = self._hyper[hyperparameter_name]\n if isinstance(value, learning_rate_schedule.LearningRateSchedule):\n return learning_rate_schedule.serialize(value)\n if callable(value):\n return value()\n if tensor_util.is_tensor(value):\n return backend.get_value(value)\n return value\n\n def variables(self):\n \"\"\"Returns variables of this Optimizer based on the order created.\"\"\"\n return self._weights\n\n @property\n def weights(self):\n \"\"\"Returns variables of this Optimizer based on the order created.\"\"\"\n return self._weights\n\n def get_weights(self):\n params = self.weights\n return backend.batch_get_value(params)\n\n # TODO(tanzheny): Maybe share this logic with base_layer.\n def set_weights(self, weights):\n params = self.weights\n if len(params) != len(weights):\n raise ValueError(\n \"You called `set_weights(weights)` on optimizer \" + self._name +\n \" with a weight list of length \" + str(len(weights)) +\n \", but the optimizer was expecting \" + str(len(params)) +\n \" weights. Provided weights: \" + str(weights)[:50] + \"...\")\n if not params:\n return\n weight_value_tuples = []\n param_values = backend.batch_get_value(params)\n for pv, p, w in zip(param_values, params, weights):\n if pv.shape != w.shape:\n raise ValueError(\"Optimizer weight shape \" + str(pv.shape) +\n \" not compatible with \"\n \"provided weight shape \" + str(w.shape))\n weight_value_tuples.append((p, w))\n backend.batch_set_value(weight_value_tuples)\n\n def add_weight(self,\n name,\n shape,\n dtype=None,\n initializer=\"zeros\",\n trainable=None,\n synchronization=tf_variables.VariableSynchronization.AUTO,\n aggregation=tf_variables.VariableAggregation.NONE):\n\n if dtype is None:\n dtype = dtypes.float32\n if isinstance(initializer, six.string_types) or callable(initializer):\n initializer = initializers.get(initializer)\n\n if synchronization == tf_variables.VariableSynchronization.ON_READ:\n if trainable:\n raise ValueError(\n \"Synchronization value can be set to \"\n \"VariableSynchronization.ON_READ only for non-trainable variables. \"\n \"You have specified trainable=True and \"\n \"synchronization=VariableSynchronization.ON_READ.\")\n else:\n # Set trainable to be false when variable is to be synced on read.\n trainable = False\n elif trainable is None:\n trainable = True\n\n variable = self._add_variable_with_custom_getter(\n name=name,\n shape=shape,\n getter=base_layer_utils.make_variable,\n overwrite=True,\n initializer=initializer,\n dtype=dtype,\n trainable=trainable,\n use_resource=True,\n synchronization=synchronization,\n aggregation=aggregation)\n backend.track_variable(variable)\n\n return variable\n\n def _init_set_name(self, name, zero_based=True):\n if not name:\n self._name = backend.unique_object_name(\n generic_utils.to_snake_case(self.__class__.__name__),\n zero_based=zero_based)\n else:\n self._name = name\n\n def _assert_valid_dtypes(self, tensors):\n \"\"\"Asserts tensors are all valid types (see `_valid_dtypes`).\n\n Args:\n tensors: Tensors to check.\n\n Raises:\n ValueError: If any tensor is not a valid type.\n \"\"\"\n valid_dtypes = self._valid_dtypes()\n for t in tensors:\n dtype = t.dtype.base_dtype\n if dtype not in valid_dtypes:\n raise ValueError(\"Invalid type %r for %s, expected: %s.\" %\n (dtype, t.name, [v for v in valid_dtypes]))\n\n def _valid_dtypes(self):\n \"\"\"Valid types for loss, variables and gradients.\n\n Subclasses should override to allow other float types.\n\n Returns:\n Valid types for loss, variables and gradients.\n \"\"\"\n return set(\n [dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])\n\n def _call_if_callable(self, param):\n \"\"\"Call the function if param is callable.\"\"\"\n return param() if callable(param) else param\n\n def _resource_apply_dense(self, grad, handle):\n \"\"\"Add ops to apply dense gradients to the variable `handle`.\n\n Args:\n grad: a `Tensor` representing the gradient.\n handle: a `Tensor` of dtype `resource` which points to the variable to be\n updated.\n\n Returns:\n An `Operation` which updates the value of the variable.\n \"\"\"\n raise NotImplementedError()\n\n def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):\n \"\"\"Add ops to apply sparse gradients to `handle`, with repeated indices.\n\n Optimizers which override this method must deal with repeated indices. See\n the docstring of `_apply_sparse_duplicate_indices` for details. By default\n the correct behavior, to sum non-unique indices and their associated\n gradients, is enforced by first pre-processing `grad` and `indices` and\n passing them on to `_resource_apply_sparse`. Optimizers which deal correctly\n with duplicate indices may instead override this method to avoid the\n overhead of summing.\n\n Args:\n grad: a `Tensor` representing the gradient for the affected indices.\n handle: a `Tensor` of dtype `resource` which points to the variable to be\n updated.\n indices: a `Tensor` of integral type representing the indices for which\n the gradient is nonzero. Indices may be repeated.\n\n Returns:\n An `Operation` which updates the value of the variable.\n \"\"\"\n summed_grad, unique_indices = _deduplicate_indexed_slices(\n values=grad, indices=indices)\n return self._resource_apply_sparse(summed_grad, handle, unique_indices)\n\n def _resource_apply_sparse(self, grad, handle, indices):\n \"\"\"Add ops to apply sparse gradients to the variable `handle`.\n\n Similar to `_apply_sparse`, the `indices` argument to this method has been\n de-duplicated. Optimizers which deal correctly with non-unique indices may\n instead override `_resource_apply_sparse_duplicate_indices` to avoid this\n overhead.\n\n Args:\n grad: a `Tensor` representing the gradient for the affected indices.\n handle: a `Tensor` of dtype `resource` which points to the variable to be\n updated.\n indices: a `Tensor` of integral type representing the indices for which\n the gradient is nonzero. Indices are unique.\n\n Returns:\n An `Operation` which updates the value of the variable.\n \"\"\"\n raise NotImplementedError()\n\n def _resource_scatter_add(self, x, i, v):\n with ops.control_dependencies(\n [resource_variable_ops.resource_scatter_add(x.handle, i, v)]):\n return x.value()\n\n def _resource_scatter_update(self, x, i, v):\n with ops.control_dependencies(\n [resource_variable_ops.resource_scatter_update(x.handle, i, v)]):\n return x.value()\n\n # ---------------\n # For implementing the trackable interface\n # ---------------\n\n def _restore_slot_variable(self, slot_name, variable, slot_variable):\n \"\"\"Restore a newly created slot variable's value.\"\"\"\n variable_key = _var_key(variable)\n deferred_restorations = self._deferred_slot_restorations.get(\n slot_name, {}).pop(variable_key, [])\n # Iterate over restores, highest restore UID first to minimize the number\n # of assignments.\n deferred_restorations.sort(key=lambda position: position.restore_uid,\n reverse=True)\n for checkpoint_position in deferred_restorations:\n checkpoint_position.restore(slot_variable)\n\n def _create_or_restore_slot_variable(\n self, slot_variable_position, slot_name, variable):\n \"\"\"Restore a slot variable's value, possibly creating it.\n\n Called when a variable which has an associated slot variable is created or\n restored. When executing eagerly, we create the slot variable with a\n restoring initializer.\n\n No new variables are created when graph building. Instead,\n _restore_slot_variable catches these after normal creation and adds restore\n ops to the graph. This method is nonetheless important when graph building\n for the case when a slot variable has already been created but `variable`\n has just been added to a dependency graph (causing us to realize that the\n slot variable needs to be restored).\n\n Args:\n slot_variable_position: A `trackable._CheckpointPosition` object\n indicating the slot variable `Trackable` object to be restored.\n slot_name: The name of this `Optimizer`'s slot to restore into.\n variable: The variable object this slot is being created for.\n \"\"\"\n variable_key = _var_key(variable)\n slot_dict = self._slots.get(variable_key, {})\n slot_variable = slot_dict.get(slot_name, None)\n if (slot_variable is None and context.executing_eagerly() and\n slot_variable_position.is_simple_variable()\n # Defer slot variable creation if there is an active variable creator\n # scope. Generally we'd like to eagerly create/restore slot variables\n # when possible, but this may mean that scopes intended to catch\n # `variable` also catch its eagerly created slot variable\n # unintentionally (specifically make_template would add a dependency on\n # a slot variable if not for this case). Deferring is mostly harmless\n # (aside from double initialization), and makes variable creator scopes\n # behave the same way they do when graph building.\n and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access\n initializer = trackable.CheckpointInitialValue(\n checkpoint_position=slot_variable_position)\n slot_variable = self.add_slot(\n var=variable,\n initializer=initializer,\n slot_name=slot_name)\n # Slot variables are not owned by any one object (because we don't want to\n # save the slot variable if the optimizer is saved without the non-slot\n # variable, or if the non-slot variable is saved without the optimizer;\n # it's a dependency hypergraph with edges of the form (optimizer, non-slot\n # variable, variable)). So we don't _track_ slot variables anywhere, and\n # instead special-case this dependency and otherwise pretend it's a normal\n # graph.\n if slot_variable is not None:\n # If we've either made this slot variable, or if we've pulled out an\n # existing slot variable, we should restore it.\n slot_variable_position.restore(slot_variable)\n else:\n # We didn't make the slot variable. Defer restoring until it gets created\n # normally. We keep a list rather than the one with the highest restore\n # UID in case slot variables have their own dependencies, in which case\n # those could differ between restores.\n self._deferred_slot_restorations.setdefault(\n slot_name, {}).setdefault(variable_key, []).append(\n slot_variable_position)\n\n\ndef _filter_grads(grads_and_vars):\n \"\"\"Filter out iterable with grad equal to None.\"\"\"\n grads_and_vars = tuple(grads_and_vars)\n if not grads_and_vars:\n return grads_and_vars\n filtered = []\n vars_with_empty_grads = []\n for grad, var in grads_and_vars:\n if grad is None:\n vars_with_empty_grads.append(var)\n else:\n filtered.append((grad, var))\n filtered = tuple(filtered)\n if not filtered:\n raise ValueError(\"No gradients provided for any variable: %s.\" %\n ([v.name for _, v in grads_and_vars],))\n if vars_with_empty_grads:\n logging.warning(\n (\"Gradients does not exist for variables %s when minimizing the loss.\"),\n ([v.name for v in vars_with_empty_grads]))\n return filtered\n\n\ndef _var_key(var):\n \"\"\"Key for representing a primary variable, for looking up slots.\n\n In graph mode the name is derived from the var shared name.\n In eager mode the name is derived from the var unique id.\n If distribution strategy exists, get the primary variable first.\n\n Args:\n var: the variable.\n\n Returns:\n the unique name of the variable.\n \"\"\"\n\n # pylint: disable=protected-access\n # Get the distributed variable if it exists.\n if hasattr(var, \"_distributed_container\"):\n var = var._distributed_container()\n if var._in_graph_mode:\n return var._shared_name\n return var._unique_id\n\n\ndef _get_slot_key_from_var(var, slot_name):\n \"\"\"Get the slot key for the variable: var_name/slot_name.\"\"\"\n\n name = _var_key(var)\n return name + \"/\" + slot_name\n\n\nclass RestoredOptimizer(OptimizerV2):\n \"\"\"A non-functional Optimizer implementation for checkpoint compatibility.\n\n Holds slot variables and hyperparameters when an optimizer is restored from a\n SavedModel. These variables may be referenced in functions along with ops\n created by the original optimizer, but currently we do not support using the\n optimizer object iself (e.g. through `apply_gradients`).\n \"\"\"\n # TODO(allenl): Make the restored optimizer functional by tracing its apply\n # methods.\n\n def __init__(self):\n super(RestoredOptimizer, self).__init__(\"RestoredOptimizer\")\n self._hypers_created = True\n\n def get_config(self):\n # TODO(allenl): Save and restore the Optimizer's config\n raise NotImplementedError(\n \"Restoring functional Optimzers from SavedModels is not currently \"\n \"supported. Please file a feature request if this limitation bothers \"\n \"you.\")\n\nrevived_types.register_revived_type(\n \"optimizer\",\n lambda obj: isinstance(obj, OptimizerV2),\n versions=[revived_types.VersionedTypeRegistration(\n object_factory=lambda proto: RestoredOptimizer(),\n version=1,\n min_producer_version=1,\n min_consumer_version=1,\n setter=RestoredOptimizer._set_hyper # pylint: disable=protected-access\n )])\n"
] |
[
[
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.keras.backend.name_scope",
"tensorflow.python.keras.backend.batch_get_value",
"tensorflow.python.keras.optimizer_v2.learning_rate_schedule.deserialize",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.backend.track_variable",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.ops.clip_ops.clip_by_value",
"tensorflow.python.training.tracking.base.CheckpointInitialValue",
"tensorflow.python.ops.array_ops.unique",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.keras.optimizer_v2.learning_rate_schedule.serialize",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.distribute.distribution_strategy_context.get_strategy",
"tensorflow.python.ops.clip_ops.clip_by_norm",
"tensorflow.python.keras.backend.get_value",
"tensorflow.python.keras.utils.tf_utils.is_symbolic_tensor",
"tensorflow.python.ops.resource_variable_ops.resource_scatter_add",
"tensorflow.python.distribute.distribution_strategy_context.get_replica_context",
"tensorflow.python.keras.backend.batch_set_value",
"tensorflow.python.keras.utils.generic_utils.to_snake_case",
"tensorflow.python.ops.gradients.gradients",
"tensorflow.python.framework.ops._get_graph_from_inputs",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.keras.backend.set_value",
"tensorflow.python.keras.initializers.get",
"tensorflow.python.keras.backend.get_graph",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.ops.resource_variable_ops.resource_scatter_update"
]
] |
vivekkatial/HAQC
|
[
"4ac97c779d28722bcebeedf9e59aeeda788dbb41"
] |
[
"qaoa_vrp/generators/random_instances.py"
] |
[
"import networkx as nx\nimport numpy as np\nimport uuid\n\nfrom qaoa_vrp.utils import distance, get_direction\n\n\ndef generate_random_instance(\n num_nodes: int,\n num_vehicles: int,\n instance_type: str,\n num_outliers: int = 1,\n gamma: int = 2,\n quasi: bool = False,\n noise: float = 0.1,\n) -> nx.classes.graph.Graph:\n \"\"\"This function creates random graphs with each node having dimension of 2\n\n Args:\n num_nodes (int): [description]\n num_vehicles (int): [description]\n instance_type (str): Must be one of \"watts_strogatz\",\n \"erdos_renyi\",\n \"complete\",\n \"newman_watts_strogatz\",\n \"euclidean_tsp\",\n \"euclidean_tsp_outlier\",\n \"asymmetric_tsp\",\n \"quasi_asymmetric_tsp\"\n num_outliers (int, optional): [description]. Defaults to 1.\n gamma (int, optional): [description]. Defaults to 2.\n quasi (bool, optional): [description]. Defaults to False.\n noise (float, optional): [description]. Defaults to 0.1.\n\n Raises:\n ValueError: [description]\n\n Returns:\n nx.classes.graph.Graph: [description]\n \"\"\"\n\n verify = False\n\n while verify == False:\n\n assert num_nodes >= 3, \"num_nodes cannot be {}, must be >= 3\".format(num_nodes)\n if instance_type not in [\n \"watts_strogatz\",\n \"erdos_renyi\",\n \"complete\",\n \"newman_watts_strogatz\",\n \"euclidean_tsp\",\n \"euclidean_tsp_outlier\",\n \"asymmetric_tsp\",\n \"quasi_asymmetric_tsp\",\n ]:\n raise ValueError(\"Incorrect Instance Type Requested\")\n\n if instance_type == \"watts_strogatz\":\n G = generate_watts_strogatz_graph(num_nodes, num_vehicles)\n elif instance_type == \"erdos_renyi\":\n G = generate_erdos_renyi(num_nodes, num_vehicles)\n elif instance_type == \"complete\":\n G = complete_graph(num_nodes)\n elif instance_type == \"newman_watts_strogatz\":\n G = generate_newman_watts_strogatz_graph(num_nodes)\n elif instance_type == \"euclidean_tsp\":\n G = generate_euclidean_graph(num_nodes)\n elif instance_type == \"euclidean_tsp_outlier\":\n G = generate_euclidean_graph_with_outliers(\n num_nodes=num_nodes, num_outliers=num_outliers, gamma=gamma\n )\n elif instance_type == \"asymmetric_tsp\":\n G = generate_asymmetric_euclidean_graph(num_nodes, quasi, noise)\n elif instance_type == \"quasi_asymmetric_tsp\":\n G = generate_asymmetric_euclidean_graph(num_nodes, quasi=True, noise=noise)\n\n for (u, v) in G.edges():\n if \"tsp\" not in instance_type:\n G.edges[u, v][\"cost\"] = round(np.random.random(), 2)\n G.edges[u, v][\"id\"] = uuid.uuid4().hex\n\n # Randomly select depot\n depot_node_id = 0\n for node in G.nodes:\n if node == depot_node_id:\n G.nodes[depot_node_id][\"tag\"] = \"Depot\"\n elif G.nodes[node].get(\"tag\") is None:\n G.nodes[node][\"tag\"] = \"\"\n\n verify = verify_graph(G, num_vehicles)\n\n return G\n\n\ndef verify_graph(G, num_vehicles):\n \"\"\"\n This function verifies that the graph generated is appropriate for the LMRO problem\n\n Args:\n G (object): the graph as a networkx graph object\n\n Returns:\n is_feasible (bool): whether the graph is feasible for the project\n \"\"\"\n\n is_feasible = False\n depot_check = False\n nodes_check = False\n\n for node in G.nodes:\n if G.nodes[node][\"tag\"] == \"Depot\":\n if len(G.edges(node)) >= 2 * num_vehicles:\n depot_check = True\n if len(G.edges(node)) >= 2:\n nodes_check = True\n if depot_check == True and nodes_check == True:\n is_feasible = True\n\n # Check for Depot\n depot_exists = False\n for node in G.nodes:\n tag = G.nodes[node][\"tag\"]\n if tag == \"Depot\":\n depot_exists = True\n\n if depot_exists == False:\n is_feasible = False\n\n return is_feasible\n\n\ndef generate_watts_strogatz_graph(num_nodes, num_vehicles, k=4, p=0.5):\n \"\"\"Build Watts Strogatz Graph\"\"\"\n G = nx.connected_watts_strogatz_graph(num_nodes, k, p, num_vehicles)\n return G\n\n\ndef generate_erdos_renyi(num_nodes, num_vehicles, p=0.5):\n \"\"\"Build Erdors-Renyi Graph\"\"\"\n G = nx.erdos_renyi_graph(num_nodes, p, num_vehicles)\n return G\n\n\ndef generate_newman_watts_strogatz_graph(num_nodes, k=4, p=0.5):\n \"\"\"Build Newman Wattz Strogatz Graph\"\"\"\n G = nx.newman_watts_strogatz_graph(num_nodes, k, p)\n return G\n\n\ndef complete_graph(num_nodes):\n \"\"\"Build Complete Graph\"\"\"\n G = nx.complete_graph(num_nodes)\n return G\n\n\ndef generate_euclidean_graph(num_nodes: int) -> nx.classes.graph.Graph:\n \"\"\"A function to generate a euclidean graph 'G' based on:\n 2. Initialise an empty graph\n 3. Randomly generate positions on a 2D plane and allocate these points as nodes\n 4. Create a complete graph by connecting all edges together and\n make the cost the euclidean distance between the two points\n\n Args:\n num_nodes (int): Number of nodes\n \"\"\"\n\n # Init range for vertices\n V = range(num_nodes)\n\n # Initialise empty graph\n G = nx.Graph()\n\n # Build nodes\n nodes = [(i, {'pos': tuple(np.random.random(2))}) for i in V]\n G.add_nodes_from(nodes)\n\n # Get positions\n pos = nx.get_node_attributes(G, 'pos')\n\n # Add edges to the graph\n for i in V:\n for j in V:\n if i != j:\n G.add_edge(i, j, cost=distance(pos[i], pos[j]))\n\n return G\n\n\ndef generate_euclidean_graph_with_outliers(\n num_nodes: int, num_outliers: int, gamma: float\n) -> nx.classes.graph.Graph:\n \"\"\"A function to generate a euclidean graph with outlier structure\n\n Args:\n num_nodes (int): Number of nodes\n num_outliers (int): Number of outliers (must be less than number of nodes)\n gamma (float): A parameter to decide how far away the nodes are from each other based on $\\sqrt{2}$\n\n Raises:\n ValueError: Number of nodes must be greater than number of outliers\n\n Returns:\n nx.classes.graph.Graph: A network.X graph object\n \"\"\"\n G = generate_euclidean_graph(num_nodes)\n\n # Randomly select k nodes from the network (check k < N)\n if num_outliers > G.number_of_nodes():\n raise ValueError(\n \"k=%s cannot be higher than the number of nodes N=%s\"\n % (num_nodes, G.number_of_nodes())\n )\n else:\n # Ensure we get k distinct nodes being selected\n random_nodes = np.random.choice(\n range(G.number_of_nodes()), num_outliers, replace=False\n )\n\n # Update the node locations\n for node in random_nodes:\n # Move node\n x_move_direction = get_direction()\n y_move_direction = get_direction()\n x_new = G.nodes()[node]['pos'][0] + x_move_direction * gamma * np.sqrt(2)\n y_new = G.nodes()[node]['pos'][1] + y_move_direction * gamma * np.sqrt(2)\n G.nodes()[node]['pos'] = (x_new, y_new)\n G.nodes()[node]['tag'] = \"outlier\"\n\n # Get new position data\n pos = nx.get_node_attributes(G, 'pos')\n\n V = range(num_nodes)\n # Recalculate edge distances\n for i in V:\n for j in V:\n if i != j:\n G.add_edge(i, j, cost=distance(pos[i], pos[j]))\n\n return G\n\n\ndef generate_asymmetric_euclidean_graph(\n num_nodes: int, quasi: bool = False, noise: float = 0.1\n) -> nx.classes.graph.Graph:\n \"\"\"A function to generate asymetric euclidean graph\n\n Args:\n num_nodes (int): Number of nodes\n quasi (bool, optional): If graph is a quasi graph. Defaults to False.\n noise (float, optional): Noise parameter. Defaults to 0.1.\n\n Returns:\n nx.classes.graph.Graph: networkX Graph\n \"\"\"\n\n # Generate random euclidean graph\n G = generate_euclidean_graph(num_nodes)\n adj = nx.adjacency_matrix(G, weight='cost')\n\n # Randomly generate an adjacency matrix with random costs for each edge\n rand = np.random.rand(len(G), len(G))\n np.fill_diagonal(rand, 0)\n\n # An asymmetric graph adjacency can be represented by:\n # A_{\\text{asym}} = A - L(A) + A_{\\text{rand}}\n if quasi:\n asymmetric_adj = adj + rand * noise\n else:\n asymmetric_adj = adj.toarray() - np.tril(adj.toarray()) + rand\n\n dt = [(\"cost\", float)]\n asymmetric_adj = np.array(asymmetric_adj, dtype=dt)\n\n # Convert this adjacency matrix into a graph\n G_asym = nx.from_numpy_array(asymmetric_adj, create_using=nx.DiGraph)\n\n # Update information regarding tags and position into the new graph\n pos = nx.get_node_attributes(G, \"pos\")\n for node in G_asym.nodes():\n G_asym.nodes()[node][\"pos\"] = pos[node]\n\n return G_asym\n"
] |
[
[
"numpy.sqrt",
"numpy.array",
"numpy.random.random",
"numpy.fill_diagonal"
]
] |
rohanverma94/spark
|
[
"8ef0159550c143e07fa79b120b2d1fdf9d535fdc"
] |
[
"python/pyspark/ml/tests/test_algorithms.py"
] |
[
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom shutil import rmtree\nimport tempfile\nimport unittest\n\nimport numpy as np\n\nfrom pyspark.ml.classification import (\n FMClassifier,\n LogisticRegression,\n MultilayerPerceptronClassifier,\n OneVsRest,\n)\nfrom pyspark.ml.clustering import DistributedLDAModel, KMeans, LocalLDAModel, LDA, LDAModel\nfrom pyspark.ml.fpm import FPGrowth\nfrom pyspark.ml.linalg import Matrices, Vectors, DenseVector\nfrom pyspark.ml.recommendation import ALS\nfrom pyspark.ml.regression import GeneralizedLinearRegression, LinearRegression\nfrom pyspark.sql import Row\nfrom pyspark.testing.mlutils import SparkSessionTestCase\n\n\nclass LogisticRegressionTest(SparkSessionTestCase):\n def test_binomial_logistic_regression_with_bound(self):\n\n df = self.spark.createDataFrame(\n [\n (1.0, 1.0, Vectors.dense(0.0, 5.0)),\n (0.0, 2.0, Vectors.dense(1.0, 2.0)),\n (1.0, 3.0, Vectors.dense(2.0, 1.0)),\n (0.0, 4.0, Vectors.dense(3.0, 3.0)),\n ],\n [\"label\", \"weight\", \"features\"],\n )\n\n lor = LogisticRegression(\n regParam=0.01,\n weightCol=\"weight\",\n lowerBoundsOnCoefficients=Matrices.dense(1, 2, [-1.0, -1.0]),\n upperBoundsOnIntercepts=Vectors.dense(0.0),\n )\n model = lor.fit(df)\n self.assertTrue(np.allclose(model.coefficients.toArray(), [-0.2944, -0.0484], atol=1e-4))\n self.assertTrue(np.isclose(model.intercept, 0.0, atol=1e-4))\n\n def test_multinomial_logistic_regression_with_bound(self):\n\n data_path = \"data/mllib/sample_multiclass_classification_data.txt\"\n df = self.spark.read.format(\"libsvm\").load(data_path)\n\n lor = LogisticRegression(\n regParam=0.01,\n lowerBoundsOnCoefficients=Matrices.dense(3, 4, range(12)),\n upperBoundsOnIntercepts=Vectors.dense(0.0, 0.0, 0.0),\n )\n model = lor.fit(df)\n expected = [\n [4.593, 4.5516, 9.0099, 12.2904],\n [1.0, 8.1093, 7.0, 10.0],\n [3.041, 5.0, 8.0, 11.0],\n ]\n for i in range(0, len(expected)):\n self.assertTrue(\n np.allclose(model.coefficientMatrix.toArray()[i], expected[i], atol=1e-4)\n )\n self.assertTrue(\n np.allclose(model.interceptVector.toArray(), [-0.9057, -1.1392, -0.0033], atol=1e-4)\n )\n\n\nclass MultilayerPerceptronClassifierTest(SparkSessionTestCase):\n def test_raw_and_probability_prediction(self):\n\n data_path = \"data/mllib/sample_multiclass_classification_data.txt\"\n df = self.spark.read.format(\"libsvm\").load(data_path)\n\n mlp = MultilayerPerceptronClassifier(\n maxIter=100, layers=[4, 5, 4, 3], blockSize=128, seed=123\n )\n model = mlp.fit(df)\n test = self.sc.parallelize([Row(features=Vectors.dense(0.1, 0.1, 0.25, 0.25))]).toDF()\n result = model.transform(test).head()\n expected_prediction = 2.0\n expected_probability = [0.0, 0.0, 1.0]\n expected_rawPrediction = [-11.6081922998, -8.15827998691, 22.17757045]\n self.assertTrue(result.prediction, expected_prediction)\n self.assertTrue(np.allclose(result.probability, expected_probability, atol=1e-4))\n # Use `assert_allclose` to show the value of `result.rawPrediction` in the assertion error\n # message\n np.testing.assert_allclose(\n result.rawPrediction,\n expected_rawPrediction,\n rtol=0.15,\n # Use the same default value as `np.allclose`\n atol=1e-08,\n )\n\n\nclass OneVsRestTests(SparkSessionTestCase):\n def test_copy(self):\n df = self.spark.createDataFrame(\n [\n (0.0, Vectors.dense(1.0, 0.8)),\n (1.0, Vectors.sparse(2, [], [])),\n (2.0, Vectors.dense(0.5, 0.5)),\n ],\n [\"label\", \"features\"],\n )\n lr = LogisticRegression(maxIter=5, regParam=0.01)\n ovr = OneVsRest(classifier=lr)\n ovr1 = ovr.copy({lr.maxIter: 10})\n self.assertEqual(ovr.getClassifier().getMaxIter(), 5)\n self.assertEqual(ovr1.getClassifier().getMaxIter(), 10)\n model = ovr.fit(df)\n model1 = model.copy({model.predictionCol: \"indexed\"})\n self.assertEqual(model1.getPredictionCol(), \"indexed\")\n\n def test_output_columns(self):\n df = self.spark.createDataFrame(\n [\n (0.0, Vectors.dense(1.0, 0.8)),\n (1.0, Vectors.sparse(2, [], [])),\n (2.0, Vectors.dense(0.5, 0.5)),\n ],\n [\"label\", \"features\"],\n )\n lr = LogisticRegression(maxIter=5, regParam=0.01)\n ovr = OneVsRest(classifier=lr, parallelism=1)\n model = ovr.fit(df)\n output = model.transform(df)\n self.assertEqual(output.columns, [\"label\", \"features\", \"rawPrediction\", \"prediction\"])\n\n def test_raw_prediction_column_is_of_vector_type(self):\n # SPARK-35142: `OneVsRestModel` outputs raw prediction as a string column\n df = self.spark.createDataFrame(\n [\n (0.0, Vectors.dense(1.0, 0.8)),\n (1.0, Vectors.sparse(2, [], [])),\n (2.0, Vectors.dense(0.5, 0.5)),\n ],\n [\"label\", \"features\"],\n )\n lr = LogisticRegression(maxIter=5, regParam=0.01)\n ovr = OneVsRest(classifier=lr, parallelism=1)\n model = ovr.fit(df)\n row = model.transform(df).head()\n self.assertIsInstance(row[\"rawPrediction\"], DenseVector)\n\n def test_parallelism_does_not_change_output(self):\n df = self.spark.createDataFrame(\n [\n (0.0, Vectors.dense(1.0, 0.8)),\n (1.0, Vectors.sparse(2, [], [])),\n (2.0, Vectors.dense(0.5, 0.5)),\n ],\n [\"label\", \"features\"],\n )\n ovrPar1 = OneVsRest(classifier=LogisticRegression(maxIter=5, regParam=0.01), parallelism=1)\n modelPar1 = ovrPar1.fit(df)\n ovrPar2 = OneVsRest(classifier=LogisticRegression(maxIter=5, regParam=0.01), parallelism=2)\n modelPar2 = ovrPar2.fit(df)\n for i, model in enumerate(modelPar1.models):\n self.assertTrue(\n np.allclose(\n model.coefficients.toArray(),\n modelPar2.models[i].coefficients.toArray(),\n atol=1e-4,\n )\n )\n self.assertTrue(np.allclose(model.intercept, modelPar2.models[i].intercept, atol=1e-4))\n\n def test_support_for_weightCol(self):\n df = self.spark.createDataFrame(\n [\n (0.0, Vectors.dense(1.0, 0.8), 1.0),\n (1.0, Vectors.sparse(2, [], []), 1.0),\n (2.0, Vectors.dense(0.5, 0.5), 1.0),\n ],\n [\"label\", \"features\", \"weight\"],\n )\n # classifier inherits hasWeightCol\n lr = LogisticRegression(maxIter=5, regParam=0.01)\n ovr = OneVsRest(classifier=lr, weightCol=\"weight\")\n self.assertIsNotNone(ovr.fit(df))\n # classifier doesn't inherit hasWeightCol\n dt = FMClassifier()\n ovr2 = OneVsRest(classifier=dt, weightCol=\"weight\")\n self.assertIsNotNone(ovr2.fit(df))\n\n\nclass KMeansTests(SparkSessionTestCase):\n def test_kmeans_cosine_distance(self):\n data = [\n (Vectors.dense([1.0, 1.0]),),\n (Vectors.dense([10.0, 10.0]),),\n (Vectors.dense([1.0, 0.5]),),\n (Vectors.dense([10.0, 4.4]),),\n (Vectors.dense([-1.0, 1.0]),),\n (Vectors.dense([-100.0, 90.0]),),\n ]\n df = self.spark.createDataFrame(data, [\"features\"])\n kmeans = KMeans(k=3, seed=1, distanceMeasure=\"cosine\")\n model = kmeans.fit(df)\n result = model.transform(df).collect()\n self.assertTrue(result[0].prediction == result[1].prediction)\n self.assertTrue(result[2].prediction == result[3].prediction)\n self.assertTrue(result[4].prediction == result[5].prediction)\n\n\nclass LDATest(SparkSessionTestCase):\n def _compare(self, m1, m2):\n \"\"\"\n Temp method for comparing instances.\n TODO: Replace with generic implementation once SPARK-14706 is merged.\n \"\"\"\n self.assertEqual(m1.uid, m2.uid)\n self.assertEqual(type(m1), type(m2))\n self.assertEqual(len(m1.params), len(m2.params))\n for p in m1.params:\n if m1.isDefined(p):\n self.assertEqual(m1.getOrDefault(p), m2.getOrDefault(p))\n self.assertEqual(p.parent, m2.getParam(p.name).parent)\n if isinstance(m1, LDAModel):\n self.assertEqual(m1.vocabSize(), m2.vocabSize())\n self.assertEqual(m1.topicsMatrix(), m2.topicsMatrix())\n\n def test_persistence(self):\n # Test save/load for LDA, LocalLDAModel, DistributedLDAModel.\n df = self.spark.createDataFrame(\n [\n [1, Vectors.dense([0.0, 1.0])],\n [2, Vectors.sparse(2, {0: 1.0})],\n ],\n [\"id\", \"features\"],\n )\n # Fit model\n lda = LDA(k=2, seed=1, optimizer=\"em\")\n distributedModel = lda.fit(df)\n self.assertTrue(distributedModel.isDistributed())\n localModel = distributedModel.toLocal()\n self.assertFalse(localModel.isDistributed())\n # Define paths\n path = tempfile.mkdtemp()\n lda_path = path + \"/lda\"\n dist_model_path = path + \"/distLDAModel\"\n local_model_path = path + \"/localLDAModel\"\n # Test LDA\n lda.save(lda_path)\n lda2 = LDA.load(lda_path)\n self._compare(lda, lda2)\n # Test DistributedLDAModel\n distributedModel.save(dist_model_path)\n distributedModel2 = DistributedLDAModel.load(dist_model_path)\n self._compare(distributedModel, distributedModel2)\n # Test LocalLDAModel\n localModel.save(local_model_path)\n localModel2 = LocalLDAModel.load(local_model_path)\n self._compare(localModel, localModel2)\n # Clean up\n try:\n rmtree(path)\n except OSError:\n pass\n\n\nclass FPGrowthTests(SparkSessionTestCase):\n def setUp(self):\n super(FPGrowthTests, self).setUp()\n self.data = self.spark.createDataFrame(\n [([1, 2],), ([1, 2],), ([1, 2, 3],), ([1, 3],)], [\"items\"]\n )\n\n def test_association_rules(self):\n fp = FPGrowth()\n fpm = fp.fit(self.data)\n\n expected_association_rules = self.spark.createDataFrame(\n [([3], [1], 1.0, 1.0, 0.5), ([2], [1], 1.0, 1.0, 0.75)],\n [\"antecedent\", \"consequent\", \"confidence\", \"lift\", \"support\"],\n )\n actual_association_rules = fpm.associationRules\n\n self.assertEqual(actual_association_rules.subtract(expected_association_rules).count(), 0)\n self.assertEqual(expected_association_rules.subtract(actual_association_rules).count(), 0)\n\n def test_freq_itemsets(self):\n fp = FPGrowth()\n fpm = fp.fit(self.data)\n\n expected_freq_itemsets = self.spark.createDataFrame(\n [([1], 4), ([2], 3), ([2, 1], 3), ([3], 2), ([3, 1], 2)], [\"items\", \"freq\"]\n )\n actual_freq_itemsets = fpm.freqItemsets\n\n self.assertEqual(actual_freq_itemsets.subtract(expected_freq_itemsets).count(), 0)\n self.assertEqual(expected_freq_itemsets.subtract(actual_freq_itemsets).count(), 0)\n\n def tearDown(self):\n del self.data\n\n\nclass ALSTest(SparkSessionTestCase):\n def test_storage_levels(self):\n df = self.spark.createDataFrame(\n [(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],\n [\"user\", \"item\", \"rating\"],\n )\n als = ALS().setMaxIter(1).setRank(1)\n # test default params\n als.fit(df)\n self.assertEqual(als.getIntermediateStorageLevel(), \"MEMORY_AND_DISK\")\n self.assertEqual(als._java_obj.getIntermediateStorageLevel(), \"MEMORY_AND_DISK\")\n self.assertEqual(als.getFinalStorageLevel(), \"MEMORY_AND_DISK\")\n self.assertEqual(als._java_obj.getFinalStorageLevel(), \"MEMORY_AND_DISK\")\n # test non-default params\n als.setIntermediateStorageLevel(\"MEMORY_ONLY_2\")\n als.setFinalStorageLevel(\"DISK_ONLY\")\n als.fit(df)\n self.assertEqual(als.getIntermediateStorageLevel(), \"MEMORY_ONLY_2\")\n self.assertEqual(als._java_obj.getIntermediateStorageLevel(), \"MEMORY_ONLY_2\")\n self.assertEqual(als.getFinalStorageLevel(), \"DISK_ONLY\")\n self.assertEqual(als._java_obj.getFinalStorageLevel(), \"DISK_ONLY\")\n\n\nclass GeneralizedLinearRegressionTest(SparkSessionTestCase):\n def test_tweedie_distribution(self):\n\n df = self.spark.createDataFrame(\n [\n (1.0, Vectors.dense(0.0, 0.0)),\n (1.0, Vectors.dense(1.0, 2.0)),\n (2.0, Vectors.dense(0.0, 0.0)),\n (2.0, Vectors.dense(1.0, 1.0)),\n ],\n [\"label\", \"features\"],\n )\n\n glr = GeneralizedLinearRegression(family=\"tweedie\", variancePower=1.6)\n model = glr.fit(df)\n self.assertTrue(np.allclose(model.coefficients.toArray(), [-0.4645, 0.3402], atol=1e-4))\n self.assertTrue(np.isclose(model.intercept, 0.7841, atol=1e-4))\n\n model2 = glr.setLinkPower(-1.0).fit(df)\n self.assertTrue(np.allclose(model2.coefficients.toArray(), [-0.6667, 0.5], atol=1e-4))\n self.assertTrue(np.isclose(model2.intercept, 0.6667, atol=1e-4))\n\n def test_offset(self):\n\n df = self.spark.createDataFrame(\n [\n (0.2, 1.0, 2.0, Vectors.dense(0.0, 5.0)),\n (0.5, 2.1, 0.5, Vectors.dense(1.0, 2.0)),\n (0.9, 0.4, 1.0, Vectors.dense(2.0, 1.0)),\n (0.7, 0.7, 0.0, Vectors.dense(3.0, 3.0)),\n ],\n [\"label\", \"weight\", \"offset\", \"features\"],\n )\n\n glr = GeneralizedLinearRegression(family=\"poisson\", weightCol=\"weight\", offsetCol=\"offset\")\n model = glr.fit(df)\n self.assertTrue(\n np.allclose(model.coefficients.toArray(), [0.664647, -0.3192581], atol=1e-4)\n )\n self.assertTrue(np.isclose(model.intercept, -1.561613, atol=1e-4))\n\n\nclass LinearRegressionTest(SparkSessionTestCase):\n def test_linear_regression_with_huber_loss(self):\n\n data_path = \"data/mllib/sample_linear_regression_data.txt\"\n df = self.spark.read.format(\"libsvm\").load(data_path)\n\n lir = LinearRegression(loss=\"huber\", epsilon=2.0)\n model = lir.fit(df)\n\n expectedCoefficients = [\n 0.136,\n 0.7648,\n -0.7761,\n 2.4236,\n 0.537,\n 1.2612,\n -0.333,\n -0.5694,\n -0.6311,\n 0.6053,\n ]\n expectedIntercept = 0.1607\n expectedScale = 9.758\n\n self.assertTrue(np.allclose(model.coefficients.toArray(), expectedCoefficients, atol=1e-3))\n self.assertTrue(np.isclose(model.intercept, expectedIntercept, atol=1e-3))\n self.assertTrue(np.isclose(model.scale, expectedScale, atol=1e-3))\n\n\nif __name__ == \"__main__\":\n from pyspark.ml.tests.test_algorithms import * # noqa: F401\n\n try:\n import xmlrunner # type: ignore[import]\n\n testRunner = xmlrunner.XMLTestRunner(output=\"target/test-reports\", verbosity=2)\n except ImportError:\n testRunner = None\n unittest.main(testRunner=testRunner, verbosity=2)\n"
] |
[
[
"numpy.isclose",
"numpy.allclose",
"numpy.testing.assert_allclose"
]
] |
RaphaelBajon/argopy
|
[
"5e8762c0e5e35f8afb70fcde996b024e6631edb1"
] |
[
"argopy/fetchers.py"
] |
[
"#!/bin/env python\n# -*coding: UTF-8 -*-\n\"\"\"\n\nHigh level helper methods to load Argo data from any source\nThe facade should be able to work with all available data access point,\n\nValidity of access points parameters (eg: wmo) is made here, not at the data/index source fetcher level\n\n\"\"\"\n\nimport warnings\nimport xarray as xr\nimport pandas as pd\nimport numpy as np\nimport logging\n\nfrom argopy.options import OPTIONS, _VALIDATORS\nfrom .errors import InvalidFetcherAccessPoint, InvalidFetcher\nfrom .utilities import list_available_data_src, list_available_index_src, is_box, is_indexbox, check_wmo\nfrom .plotters import plot_trajectory, bar_plot, open_sat_altim_report\n\n\nAVAILABLE_DATA_SOURCES = list_available_data_src()\nAVAILABLE_INDEX_SOURCES = list_available_index_src()\n\nlog = logging.getLogger(\"argopy.fetchers.facade\")\n\n\ndef checkAccessPoint(AccessPoint):\n \"\"\"Decorator to validate fetcher access points of a given data source.\n\n This decorator will check if an access point (eg: 'profile') is available for the\n data source (eg: 'erddap') used to initiate the checker. If not, an error is raised.\n\n #todo Make sure this decorator preserves the doc string !\n \"\"\"\n def wrapper(*args):\n if AccessPoint.__name__ not in args[0].valid_access_points:\n raise InvalidFetcherAccessPoint(\n \"'%s' not available with '%s' src. Available access point(s): %s\" %\n (AccessPoint.__name__, args[0]._src, \", \".join(args[0].Fetchers.keys()))\n )\n return AccessPoint(*args)\n wrapper.__name__ = AccessPoint.__name__\n wrapper.__doc__ = AccessPoint.__doc__\n return wrapper\n\n\nclass ArgoDataFetcher:\n \"\"\" Fetcher and post-processor of Argo data (API facade)\n\n Parameters\n ----------\n mode: str, optional\n User mode. Eg: ``standard`` or ``expert``. Set to OPTIONS['mode'] by default if empty.\n src: str, optional\n Source of the data to use. Eg: ``erddap``. Set to OPTIONS['src'] by default if empty.\n ds: str, optional\n Name of the dataset to load. Eg: ``phy``. Set to OPTIONS['dataset'] by default if empty.\n **fetcher_kwargs: optional\n Additional arguments passed on data source fetcher creation of each access points.\n\n Examples\n --------\n >>> from argopy import DataFetcher\n >>> adf = DataFetcher.region([-75, -65, 10, 20]).load()\n >>> idx.plot()\n >>> idx.data\n\n \"\"\"\n\n def __init__(self, mode: str = \"\", src: str = \"\", ds: str = \"\", **fetcher_kwargs):\n\n \"\"\" Create a fetcher instance\n\n\n Returns\n -------\n :class:`argopy.fetchers.ArgoDataFetcher`\n \"\"\"\n\n # Facade options:\n self._mode = OPTIONS[\"mode\"] if mode == \"\" else mode\n self._dataset_id = OPTIONS[\"dataset\"] if ds == \"\" else ds\n self._src = OPTIONS[\"src\"] if src == \"\" else src\n\n _VALIDATORS[\"mode\"](self._mode)\n _VALIDATORS[\"src\"](self._src)\n _VALIDATORS[\"dataset\"](self._dataset_id)\n\n # Load data source access points:\n if self._src not in AVAILABLE_DATA_SOURCES:\n raise InvalidFetcher(\n \"Requested data fetcher '%s' not available ! Please try again with any of: %s\"\n % (self._src, \"\\n\".join(AVAILABLE_DATA_SOURCES))\n )\n else:\n Fetchers = AVAILABLE_DATA_SOURCES[self._src]\n\n # Auto-discovery of access points for this fetcher:\n # rq: Access point names for the facade are not the same as the access point of fetchers\n self.Fetchers = {}\n self.valid_access_points = []\n for p in Fetchers.access_points:\n if p == \"box\": # Required for 'region'\n self.Fetchers[\"region\"] = Fetchers.Fetch_box\n self.valid_access_points.append(\"region\")\n if p == \"wmo\": # Required for 'profile' and 'float'\n self.Fetchers[\"float\"] = Fetchers.Fetch_wmo\n self.valid_access_points.append(\"float\")\n self.Fetchers[\"profile\"] = Fetchers.Fetch_wmo\n self.valid_access_points.append(\"profile\")\n\n # Init sub-methods:\n self.fetcher = None\n if self._dataset_id not in Fetchers.dataset_ids:\n raise ValueError(\n \"%s dataset is not available for this data source (%s)\"\n % (self._dataset_id, self._src)\n )\n self.fetcher_kwargs = {**fetcher_kwargs}\n self.fetcher_options = {**{\"ds\": self._dataset_id}, **fetcher_kwargs}\n self.postproccessor = self.__empty_processor\n self._AccessPoint = None\n\n # Init data structure holders:\n self._index = None\n self._data = None\n\n # More init:\n self._loaded = False\n self._request = \"\"\n\n # Dev warnings\n # Todo Clean-up before each release\n if self._dataset_id == \"bgc\" and self._mode == \"standard\":\n warnings.warn(\n \"'BGC' dataset fetching in 'standard' user mode is not yet reliable. \"\n \"Try to switch to 'expert' mode if you encounter errors.\"\n )\n\n def __repr__(self):\n if self.fetcher:\n summary = [self.fetcher.__repr__()]\n if \"parallel\" in self.fetcher_options:\n summary.append(\n \"Backend: %s (parallel=%s)\"\n % (self._src, str(self.fetcher_options[\"parallel\"]))\n )\n else:\n summary.append(\"Backend: %s\" % self._src)\n else:\n summary = [\"<datafetcher.%s> 'No access point initialised'\" % self._src]\n summary.append(\"Available access points: %s\" % \", \".join(self.Fetchers.keys()))\n if \"parallel\" in self.fetcher_options:\n summary.append(\"Backend: %s (parallel=%s)\" % (self._src, str(self.fetcher_options[\"parallel\"])))\n else:\n summary.append(\"Backend: %s\" % self._src)\n\n summary.append(\"User mode: %s\" % self._mode)\n summary.append(\"Dataset: %s\" % self._dataset_id)\n return \"\\n\".join(summary)\n\n def __empty_processor(self, xds):\n \"\"\" Do nothing to a dataset \"\"\"\n return xds\n\n def __getattr__(self, key):\n \"\"\" Validate access points \"\"\"\n valid_attrs = [\n \"Fetchers\",\n \"fetcher\",\n \"fetcher_options\",\n \"postproccessor\",\n \"data\",\n \"index\",\n \"domain\",\n \"_loaded\",\n \"_request\"\n ]\n if key not in self.valid_access_points and key not in valid_attrs:\n raise InvalidFetcherAccessPoint(\"'%s' is not a valid access point\" % key)\n pass\n\n @property\n def uri(self):\n \"\"\" List of resources to load for a request\n\n This can be a list of paths or urls, depending on the data source selected.\n\n Returns\n -------\n list(str)\n List of resources used to fetch data\n \"\"\"\n if self.fetcher:\n return self.fetcher.uri\n else:\n raise InvalidFetcherAccessPoint(\n \" Initialize an access point (%s) first.\"\n % \",\".join(self.Fetchers.keys())\n )\n\n @property\n def data(self):\n \"\"\" Data structure\n\n Returns\n --------\n :class:`xarray.DataArray`\n Fetched data\n \"\"\"\n if not isinstance(self._data, xr.Dataset):\n self.load()\n return self._data\n\n @property\n def index(self):\n \"\"\" Index structure, as returned by the to_index method\n\n Returns\n --------\n :class:`pandas.DataFrame`\n Argo-like index of fetched data\n \"\"\"\n if not isinstance(self._index, pd.core.frame.DataFrame):\n self.load()\n return self._index\n\n @property\n def domain(self):\n \"\"\"\" Domain of the dataset\n\n This is different from a usual ``box`` because dates are already in numpy.datetime64 format.\n \"\"\"\n this_ds = self.data\n if 'PRES_ADJUSTED' in this_ds.data_vars:\n Pmin = np.nanmin((np.min(this_ds['PRES'].values), np.min(this_ds['PRES_ADJUSTED'].values)))\n Pmax = np.nanmax((np.max(this_ds['PRES'].values), np.max(this_ds['PRES_ADJUSTED'].values)))\n else:\n Pmin = np.min(this_ds['PRES'].values)\n Pmax = np.max(this_ds['PRES'].values)\n\n return [np.min(this_ds['LONGITUDE'].values), np.max(this_ds['LONGITUDE'].values),\n np.min(this_ds['LATITUDE'].values), np.max(this_ds['LATITUDE'].values),\n Pmin, Pmax,\n np.min(this_ds['TIME'].values), np.max(this_ds['TIME'].values)]\n\n def dashboard(self, **kw):\n \"\"\" Open access point dashboard \"\"\"\n try:\n return self.fetcher.dashboard(**kw)\n except Exception:\n warnings.warn(\n \"dashboard not available for this fetcher access point (%s/%s)\"\n % (self._src, self._AccessPoint)\n )\n\n @checkAccessPoint\n def float(self, wmo, **kw):\n \"\"\" Float data fetcher\n\n Parameters\n ----------\n wmo: int, list(int)\n Define the list of Argo floats to load data for. This is a list of integers with WMO float identifiers.\n WMO is the World Meteorological Organization.\n\n Returns\n -------\n :class:`argopy.fetchers.ArgoDataFetcher.float`\n A data source fetcher for all float profiles\n \"\"\"\n wmo = check_wmo(wmo) # Check and return a valid list of WMOs\n if \"CYC\" in kw or \"cyc\" in kw:\n raise TypeError(\n \"float() got an unexpected keyword argument 'cyc'. Use 'profile' access \"\n \"point to fetch specific profile data.\"\n )\n\n self.fetcher = self.Fetchers[\"float\"](WMO=wmo, **self.fetcher_options)\n self._AccessPoint = \"float\" # Register the requested access point\n self._AccessPoint_data = {'wmo': wmo} # Register the requested access point data\n\n if self._mode == \"standard\" and self._dataset_id != \"ref\":\n def postprocessing(xds):\n xds = self.fetcher.filter_data_mode(xds)\n xds = self.fetcher.filter_qc(xds)\n xds = self.fetcher.filter_variables(xds, self._mode)\n return xds\n\n self.postproccessor = postprocessing\n\n return self\n\n @checkAccessPoint\n def profile(self, wmo, cyc):\n \"\"\" Profile data fetcher\n\n Parameters\n ----------\n wmo: int, list(int)\n Define the list of Argo floats to load data for. This is a list of integers with WMO float identifiers.\n WMO is the World Meteorological Organization.\n cyc: list(int)\n Define the list of cycle numbers to load for each Argo floats listed in ``wmo``.\n\n Returns\n -------\n :class:`argopy.fetchers.ArgoDataFetcher.profile`\n A data source fetcher for specific float profiles\n \"\"\"\n wmo = check_wmo(wmo) # Check and return a valid list of WMOs\n self.fetcher = self.Fetchers[\"profile\"](WMO=wmo, CYC=cyc, **self.fetcher_options)\n self._AccessPoint = \"profile\" # Register the requested access point\n self._AccessPoint_data = {'wmo': wmo, 'cyc': cyc} # Register the requested access point data\n\n if self._mode == \"standard\" and self._dataset_id != \"ref\":\n def postprocessing(xds):\n xds = self.fetcher.filter_data_mode(xds)\n xds = self.fetcher.filter_qc(xds)\n xds = self.fetcher.filter_variables(xds, self._mode)\n return xds\n self.postproccessor = postprocessing\n\n return self\n\n @checkAccessPoint\n def region(self, box: list):\n \"\"\" Space/time domain data fetcher\n\n Parameters\n ----------\n box: list()\n Define the domain to load Argo data for. The box list is made of:\n - lon_min: float, lon_max: float,\n - lat_min: float, lat_max: float,\n - dpt_min: float, dpt_max: float,\n - date_min: str (optional), date_max: str (optional)\n\n Longitude, latitude and pressure bounds are required, while the two bounding dates are optional.\n If bounding dates are not specified, the entire time series is fetched.\n Eg: [-60, -55, 40., 45., 0., 10., '2007-08-01', '2007-09-01']\n\n Returns\n -------\n :class:`argopy.fetchers.ArgoDataFetcher`\n A data source fetcher for a space/time domain\n \"\"\"\n is_box(box, errors=\"raise\") # Validate the box definition\n self.fetcher = self.Fetchers[\"region\"](box=box, **self.fetcher_options)\n self._AccessPoint = \"region\" # Register the requested access point\n self._AccessPoint_data = {'box': box} # Register the requested access point data\n\n if self._mode == \"standard\" and self._dataset_id != \"ref\":\n def postprocessing(xds):\n xds = self.fetcher.filter_data_mode(xds)\n xds = self.fetcher.filter_qc(xds)\n xds = self.fetcher.filter_variables(xds, self._mode)\n return xds\n self.postproccessor = postprocessing\n\n return self\n\n def to_xarray(self, **kwargs):\n \"\"\" Fetch and return data as xarray.DataSet\n\n Trigger a fetch of data by the specified source and access point.\n\n Returns\n -------\n :class:`xarray.DataSet`\n Fetched data\n \"\"\"\n if not self.fetcher:\n raise InvalidFetcher(\n \" Initialize an access point (%s) first.\"\n % \",\".join(self.Fetchers.keys())\n )\n xds = self.fetcher.to_xarray(**kwargs)\n xds = self.postproccessor(xds)\n return xds\n\n def to_dataframe(self, **kwargs):\n \"\"\" Fetch and return data as pandas.Dataframe\n\n Trigger a fetch of data by the specified source and access point.\n\n Returns\n -------\n :class:`pandas.DataFrame`\n Fetched data\n \"\"\"\n if not self.fetcher:\n raise InvalidFetcher(\n \" Initialize an access point (%s) first.\"\n % \",\".join(self.Fetchers.keys())\n )\n return self.load().data.to_dataframe(**kwargs)\n\n def to_index(self, full: bool = False):\n \"\"\" Create an index of Argo data, fetch data if necessary\n\n Build an Argo-like index of profiles from fetched data.\n\n Parameters\n ----------\n full: bool\n Should extract a full index, as returned by an IndexFetcher or only a space/time\n index of fetched profiles (this is the default choice, i.e. full=False).\n\n Returns\n -------\n :class:`pandas.DataFrame`\n Argo-like index of fetched data\n \"\"\"\n if not full:\n self.load()\n ds = self.data.argo.point2profile()\n df = (\n ds.drop_vars(set(ds.data_vars) - set([\"PLATFORM_NUMBER\"]))\n .drop_dims(\"N_LEVELS\")\n .to_dataframe()\n )\n df = (\n df.reset_index()\n .rename(\n columns={\n \"PLATFORM_NUMBER\": \"wmo\",\n \"LONGITUDE\": \"longitude\",\n \"LATITUDE\": \"latitude\",\n \"TIME\": \"date\",\n }\n )\n .drop(columns=\"N_PROF\")\n )\n df = df[[\"date\", \"latitude\", \"longitude\", \"wmo\"]]\n\n else:\n # Instantiate and load an IndexFetcher:\n index_loader = ArgoIndexFetcher(mode=self._mode,\n src=self._src,\n ds=self._dataset_id,\n **self.fetcher_kwargs)\n if self._AccessPoint == 'float':\n index_loader.float(self._AccessPoint_data['wmo']).load()\n if self._AccessPoint == 'profile':\n index_loader.profile(self._AccessPoint_data['wmo'], self._AccessPoint_data['cyc']).load()\n if self._AccessPoint == 'region':\n # Convert data box to index box (remove depth info):\n index_box = self._AccessPoint_data['box'].copy()\n del index_box[4:6]\n index_loader.region(index_box).load()\n df = index_loader.index\n\n if self._loaded and self._mode == 'standard' and len(self._index) != len(df):\n warnings.warn(\"Loading a full index in 'standard' user mode may lead to more profiles in the \"\n \"index than reported in data.\")\n\n # Possibly replace the light index with the full version:\n if not self._loaded or self._request == self.__repr__():\n self._index = df\n\n return df\n\n def load(self, force: bool = False, **kwargs):\n \"\"\" Fetch data (and compute an index) if not already in memory\n\n Apply the default to_xarray() and to_index() methods and store results in memory.\n Access loaded measurements structure with the `data` and `index` properties::\n\n ds = ArgoDataFetcher().profile(6902746, 34).load().data\n # or\n df = ArgoDataFetcher().float(6902746).load().index\n\n Parameters\n ----------\n force: bool\n Force fetching data if not already in memory, default is False.\n\n Returns\n -------\n :class:`argopy.fetchers.ArgoDataFetcher.float`\n Data fetcher with `data` and `index` properties in memory\n \"\"\"\n # Force to load data if the fetcher definition has changed\n if self._loaded and self._request != self.__repr__():\n force = True\n\n if not self._loaded or force:\n # Fetch measurements:\n self._data = self.to_xarray(**kwargs)\n # Next 2 lines must come before ._index because to_index() calls back on .load() to read .data\n self._request = self.__repr__() # Save definition of loaded data\n self._loaded = True\n # Extract measurements index from data:\n self._index = self.to_index(full=False)\n return self\n\n def clear_cache(self):\n \"\"\" Clear data cached by fetcher \"\"\"\n if not self.fetcher:\n raise InvalidFetcher(\n \" Initialize an access point (%s) first.\"\n % \",\".join(self.Fetchers.keys())\n )\n return self.fetcher.clear_cache()\n\n def plot(self, ptype=\"trajectory\", **kwargs):\n \"\"\" Create custom plots from data\n\n Parameters\n ----------\n ptype: {'trajectory',' profiler', 'dac', 'qc_altimetry}, default: 'trajectory'\n\n Returns\n -------\n fig: :class:`matplotlib.figure.Figure`\n ax: :class:`matplotlib.axes.Axes`\n \"\"\"\n self.load()\n if ptype in [\"dac\", \"institution\"]:\n if \"institution\" not in self.index:\n self.to_index(full=True)\n return bar_plot(self.index, by=\"institution\", **kwargs)\n elif ptype == \"profiler\":\n if \"profiler\" not in self.index:\n self.to_index(full=True)\n return bar_plot(self.index, by=\"profiler\", **kwargs)\n elif ptype == \"trajectory\":\n return plot_trajectory(self.index, **kwargs)\n elif ptype == \"qc_altimetry\":\n WMOs = np.unique(self.data['PLATFORM_NUMBER'])\n return open_sat_altim_report(WMOs, **kwargs)\n else:\n raise ValueError(\n \"Type of plot unavailable. Use: 'trajectory', 'dac', 'profiler', 'qc_altimetry'\"\n )\n\n\nclass ArgoIndexFetcher:\n \"\"\" Fetcher and post-processor of Argo index data (API facade)\n\n Parameters\n ----------\n mode: str, optional\n User mode. Eg: ``standard`` or ``expert``. Set to OPTIONS['mode'] by default if empty.\n src: str, optional\n Source of the data to use. Eg: ``erddap``. Set to OPTIONS['src'] by default if empty.\n ds: str, optional\n Name of the dataset to load. Eg: ``phy``. Set to OPTIONS['dataset'] by default if empty.\n **fetcher_kwargs: optional\n Additional arguments passed on data source fetcher of each access points.\n\n Notes\n -----\n Spec discussions can be found here:\n https://github.com/euroargodev/argopy/issues/8\n\n https://github.com/euroargodev/argopy/pull/6\n\n Examples\n --------\n >>> from argopy import IndexFetcher\n >>> adf = IndexFetcher.region([-75, -65, 10, 20]).load()\n >>> idx.plot()\n >>> idx.index\n \"\"\"\n\n def __init__(self, mode: str = \"\", src: str = \"\", ds: str = \"\", **fetcher_kwargs):\n\n # Facade options:\n self._mode = OPTIONS[\"mode\"] if mode == \"\" else mode\n self._dataset_id = OPTIONS[\"dataset\"] if ds == \"\" else ds\n self._src = OPTIONS[\"src\"] if src == \"\" else src\n\n _VALIDATORS[\"mode\"](self._mode)\n _VALIDATORS[\"src\"](self._src)\n\n # Load data source access points:\n if self._src not in AVAILABLE_INDEX_SOURCES:\n raise InvalidFetcher(\n \"Requested index fetcher '%s' not available ! \"\n \"Please try again with any of: %s\"\n % (self._src, \"\\n\".join(AVAILABLE_INDEX_SOURCES))\n )\n else:\n Fetchers = AVAILABLE_INDEX_SOURCES[self._src]\n\n # Auto-discovery of access points for this fetcher:\n # rq: Access point names for the facade are not the same as the access point of fetchers\n self.Fetchers = {}\n self.valid_access_points = []\n for p in Fetchers.access_points:\n if p == \"box\": # Required for 'region'\n self.Fetchers[\"region\"] = Fetchers.Fetch_box\n self.valid_access_points.append(\"region\")\n if p == \"wmo\": # Required for 'profile' and 'float'\n self.Fetchers[\"float\"] = Fetchers.Fetch_wmo\n self.valid_access_points.append(\"float\")\n self.Fetchers[\"profile\"] = Fetchers.Fetch_wmo\n self.valid_access_points.append(\"profile\")\n\n # Init sub-methods:\n self.fetcher = None\n if self._dataset_id not in Fetchers.dataset_ids:\n raise ValueError(\n \"%s dataset is not available for this index source (%s)\"\n % (self._dataset_id, self._src)\n )\n self.fetcher_options = {**fetcher_kwargs}\n self.postproccessor = self.__empty_processor\n self._AccessPoint = None\n\n # Init data structure holders:\n self._index = None\n\n # More init:\n self._loaded = False\n self._request = \"\"\n\n def __repr__(self):\n if self.fetcher:\n summary = [self.fetcher.__repr__(),\n \"Backend: %s\" % self._src]\n else:\n summary = [\"<indexfetcher.%s> 'No access point initialised'\" % self._src,\n \"Available access points: %s\" % \", \".join(self.Fetchers.keys()),\n \"Backend: %s\" % self._src]\n\n summary.append(\"User mode: %s\" % self._mode)\n summary.append(\"Dataset: %s\" % self._dataset_id)\n return \"\\n\".join(summary)\n\n def __empty_processor(self, xds):\n \"\"\" Do nothing to a dataset \"\"\"\n return xds\n\n def __getattr__(self, key):\n \"\"\" Validate access points \"\"\"\n valid_attrs = [\n \"Fetchers\",\n \"fetcher\",\n \"fetcher_options\",\n \"postproccessor\",\n \"index\",\n \"_loaded\",\n ]\n if key not in self.valid_access_points and key not in valid_attrs:\n raise InvalidFetcherAccessPoint(\"'%s' is not a valid access point\" % key)\n pass\n\n @property\n def index(self):\n \"\"\" Index structure\n\n Returns\n --------\n :class:`pandas.DataFrame`\n Argo-like index of fetched data\n \"\"\"\n if not isinstance(self._index, pd.core.frame.DataFrame):\n self.load()\n return self._index\n\n @checkAccessPoint\n def float(self, wmo):\n \"\"\" Float index fetcher\n\n Parameters\n ----------\n wmo: list(int)\n Define the list of Argo floats to load data for. This is a list of integers with WMO numbers.\n\n Returns\n -------\n :class:`argopy.fetchers.ArgoIndexFetcher`\n An index fetcher initialised for specific floats\n \"\"\"\n wmo = check_wmo(wmo) # Check and return a valid list of WMOs\n self.fetcher = self.Fetchers[\"float\"](WMO=wmo, **self.fetcher_options)\n self._AccessPoint = \"float\" # Register the requested access point\n return self\n\n @checkAccessPoint\n def profile(self, wmo, cyc):\n \"\"\" Profile index fetcher\n\n Parameters\n ----------\n wmo: int, list(int)\n Define the list of Argo floats to load index for. This is a list of integers with WMO float identifiers.\n WMO is the World Meteorological Organization.\n cyc: list(int)\n Define the list of cycle numbers to load for each Argo floats listed in ``wmo``.\n\n Returns\n -------\n :class:`argopy.fetchers.ArgoIndexFetcher`\n An index fetcher initialised for specific float profiles\n \"\"\"\n wmo = check_wmo(wmo) # Check and return a valid list of WMOs\n self.fetcher = self.Fetchers[\"profile\"](WMO=wmo, CYC=cyc, **self.fetcher_options)\n self._AccessPoint = \"profile\" # Register the requested access point\n return self\n\n @checkAccessPoint\n def region(self, box):\n \"\"\" Space/time domain index fetcher\n\n Parameters\n ----------\n box: list()\n Define the domain to load Argo index for. The box list is made of:\n - lon_min: float, lon_max: float,\n - lat_min: float, lat_max: float,\n - date_min: str (optional), date_max: str (optional)\n\n Longitude and latitude bounds are required, while the two bounding dates are optional.\n If bounding dates are not specified, the entire time series is fetched.\n Eg: [-60, -55, 40., 45., '2007-08-01', '2007-09-01']\n\n Returns\n -------\n :class:`argopy.fetchers.ArgoIndexFetcher`\n An index fetcher initialised for a space/time domain\n\n Warnings\n --------\n Note that the box option for an index fetcher does not have pressure bounds, contrary to the data fetcher.\n \"\"\"\n is_indexbox(box, errors=\"raise\") # Validate the box definition\n self.fetcher = self.Fetchers[\"region\"](box=box, **self.fetcher_options)\n self._AccessPoint = \"region\" # Register the requested access point\n return self\n\n def to_dataframe(self, **kwargs):\n \"\"\" Fetch and return index data as pandas Dataframe\n\n Returns\n -------\n :class:`pandas.DataFrame`\n \"\"\"\n if not self.fetcher:\n raise InvalidFetcher(\n \" Initialize an access point (%s) first.\"\n % \",\".join(self.Fetchers.keys())\n )\n return self.fetcher.to_dataframe(**kwargs)\n\n def to_xarray(self, **kwargs):\n \"\"\" Fetch and return index data as xarray DataSet\n\n This is a shortcut to .load().index.to_xarray()\n\n Returns\n -------\n :class:`xarray.DataSet`\n \"\"\"\n if self._AccessPoint not in self.valid_access_points:\n raise InvalidFetcherAccessPoint(\n \" Initialize an access point (%s) first.\"\n % \",\".join(self.Fetchers.keys())\n )\n return self.load().index.to_xarray(**kwargs)\n\n def to_csv(self, file: str = \"output_file.csv\"):\n \"\"\" Fetch and save index data as csv in a file\n\n Notes\n -----\n >>> idx.to_csv()\n is a shortcut to:\n >>> idx.load().index.to_csv()\n\n Since the ``index`` property is a :class:`pandas.DataFrame`, this is currently a short\n cut to :meth:`pandas.DataFrame.to_index`\n\n Returns\n -------\n None\n \"\"\"\n if self._AccessPoint not in self.valid_access_points:\n raise InvalidFetcherAccessPoint(\n \" Initialize an access point (%s) first.\"\n % \",\".join(self.Fetchers.keys())\n )\n return self.load().index.to_csv(file)\n\n def load(self, force: bool = False):\n \"\"\" Load index in memory\n\n Apply the default to_dataframe() method and store results in memory.\n Access loaded index structure with the `index` property::\n\n df = ArgoIndexFetcher().float(6902746).load().index\n\n Parameters\n ----------\n force: bool\n Force loading, default is False.\n\n Returns\n -------\n :class:`argopy.fetchers.ArgoIndexFetcher.float`\n Index fetcher with `index` property in memory\n \"\"\"\n # Force to load data if the fetcher definition has changed\n if self._loaded and self._request != self.__repr__():\n force = True\n\n if not self._loaded or force:\n self._index = self.to_dataframe()\n self._request = self.__repr__() # Save definition of loaded data\n self._loaded = True\n return self\n\n def plot(self, ptype=\"trajectory\", **kwargs):\n \"\"\" Create custom plots from index\n\n Parameters\n ----------\n ptype: {'trajectory',' profiler', 'dac', 'qc_altimetry}, default: 'trajectory'\n\n Returns\n -------\n fig: :class:`matplotlib.figure.Figure`\n ax: :class:`matplotlib.axes.Axes`\n \"\"\"\n self.load()\n if ptype in [\"dac\", \"institution\"]:\n return bar_plot(self.index, by=\"institution\", **kwargs)\n elif ptype == \"profiler\":\n return bar_plot(self.index, by=\"profiler\", **kwargs)\n elif ptype == \"trajectory\":\n return plot_trajectory(self.index.sort_values([\"file\"]), **kwargs)\n elif ptype == \"qc_altimetry\":\n WMOs = np.unique(self.index['wmo'])\n return open_sat_altim_report(WMOs, **kwargs)\n else:\n raise ValueError(\n \"Type of plot unavailable. Use: 'trajectory', 'dac', 'profiler', 'qc_altimetry'\"\n )\n\n def clear_cache(self):\n \"\"\" Clear fetcher cached data \"\"\"\n return self.fetcher.clear_cache()\n"
] |
[
[
"numpy.max",
"numpy.unique",
"numpy.min"
]
] |
Lazersmoke/idawator-hacking
|
[
"12db250afa6f0192041a233339db535edbc72f86",
"12db250afa6f0192041a233339db535edbc72f86"
] |
[
"py/learning.py",
"py/parseMidi.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom mido import MidiFile, Message, MidiTrack\nfrom itertools import permutations\nimport os\n\nnp.set_printoptions(precision=2)\n\n# This is the size of the Fock space of simulatneous notes\n# The Fock space should be thought of as:\n# (note) `direct sum` (note X note) `direct sum` (note X note X note) ...\n# Up to the fockSize-fold cartesian product: \\prod_{i=1}^fockSize note_i\nfockSize = 10\n\n# A single note in the fock representation described above\n# 12 pitch classes, and 11 octaves, one-hot encoding\n# 11 octaves because MIDI comes in [0,127], last octave is [121,127]\nsingleNoteSize = 12 + 11\n\n# Compute some constants based on the fockSize and singleNoteSize\ntotalFockSize = 0\nfockOffsets = []\nfor i in range(fockSize):\n fockOffsets.append(totalFockSize)\n totalFockSize += (i + 1) * singleNoteSize\n# Include the ending offset; this is useful sometimes\nfockOffsets.append(totalFockSize)\n\n# totalFockSize is the dimension of the Fock space\nprint(\"Total fock size is {}\".format(totalFockSize))\n\n# fockOffsets holds the offsets to the different direct summands of the Fock space\n# The summand with k simultaneous notes is between fockOffsets[k-1] and fockOffsets[k]\nprint(\"Fock offsets are {}\".format(fockOffsets))\n\n# Add in the time density as an extra dimension\n# Time density is the amount of time that this particular chunk of\n# the MIDI takes up (in midi ticks). It's a density because the non-uniform\n# MIDI is being squished and stretched to be uniform (timeseries)\n# so we need to keep the density of that embedding around\n\n# NoteSpec = (Time part) `direct sum` (Fock space)\n# Note that it is not one-hot encoded\nnoteSpecSize = 1 + totalFockSize\n\n# Convert a midi note in [0,127] to a one-hot encoded\n# vector of size singleNoteSize\ndef midiNoteToSingleNoteChunk(midiNote):\n (octave,pitchClass) = divmod(midiNote,12)\n vec = np.zeros(singleNoteSize)\n vec[pitchClass] = 1\n vec[12 + octave] = 1\n return vec\n\n# Build a NoteSpec out of the current midi situation during this particular chunk of midi\ndef mkNoteSpec(heldNotes,decayingNotes,timeDensity):\n # We don't distinguish between notes that are being freshly played right now\n # and those which are now in the release stage of ADSR\n allNotes = heldNotes + decayingNotes\n fockVec = np.zeros(totalFockSize)\n # If there are more simultaneous notes than fockSize, we can't represent it\n # The current solution is to arbitrarily drop them by slicing the list\n # This isn't particularly stable due to heldNotes being in potentially arbitrary orders\n if len(allNotes) > fockSize:\n allNotes = allNotes[:fockSize]\n # This print statement will bottleneck your parsing in some cases\n #print(\"!!! Fock size of {} exceeded by {} simultaneous notes; dropping {} notes !!!\".format(fockSize,len(allNotes), len(allNotes) - fockSize))\n\n # Decide which direct summand of the fock space we fall under\n startIdx = fockOffsets[len(allNotes) - 1]\n # Place each currently playing note into the correct slot\n for i in range(len(allNotes)):\n fockVec[startIdx + i * singleNoteSize : startIdx + (i + 1) * singleNoteSize] = midiNoteToSingleNoteChunk(allNotes[i])\n # Include the time density in front of the fock vector to make this a NoteSpec\n return np.insert(fockVec,0,timeDensity)\n\n# Given a MIDI filename on disk, get the list of tracks\ndef getTracksFromMidi(filename,verbose=False):\n mid = MidiFile(filename)\n tracks = []\n for i, track in enumerate(mid.tracks):\n if verbose:\n print('Track {}: {}'.format(i, track.name))\n # The notes that are currently being played\n # This must be tracked between midi messages\n heldNotes = []\n # Notes that are turning off\n toUnHold = []\n lastTime = 0\n noteSpecs = []\n for msg in track:\n # MIDI typically issues several commands with time=0, and the last one with\n # time equal to the time until the next command in midi ticks\n # If the time isn't zero, then we have all the commands from this chunk\n # and it's time to write it out\n\n # On top of this, we treat anything short of 4 midi ticks as essentially happening in zero time.\n if msg.time > 3:\n # Cap the time density so that pathologically long\n # notes in the data don't destroy everything\n maxTimeDensity = 200\n\n if len(heldNotes) + len(toUnHold) > 0:\n noteSpec = mkNoteSpec(heldNotes,toUnHold,min(lastTime,maxTimeDensity))\n noteSpecs.append(noteSpec)\n\n # Formally advance the time to the next chunk\n lastTime = msg.time\n\n # All the notes that were turning off in the last chunk are now fully off\n for n in toUnHold:\n if n in heldNotes:\n heldNotes.remove(n)\n toUnHold = []\n\n # Adjust the note states based on the MIDI message\n if msg.type == 'note_on' and msg.note not in heldNotes:\n heldNotes.append(msg.note)\n elif msg.type == 'note_off' and msg.note in heldNotes:\n toUnHold.append(msg.note)\n\n if verbose:\n print(\"Found {} Note Specs\".format(len(noteSpecs)))\n\n # Too-short tracks are typically like effect tracks\n # or other weird things that we don't want to learn on\n minTrackSize = 200\n if(len(noteSpecs) >= minTrackSize):\n tracks.append(np.stack(noteSpecs,axis=0))\n elif verbose:\n print(\"Track Discarded XXX\")\n return tracks\n\n# Create a tensorflow dataset of prediction examples (ie, flashcards)\n# based on the given track (which is just a list of NoteSpec's)\ndef mkDatasetFromTrack(track):\n # The length in NoteSpec's of sequences that we should (a) learn on and (b) generate\n seq_length = 50\n\n # Chop up the input track into slices\n # Makes about len(track)/(seq_length + 1) examples out of the track\n sequences = tf.data.Dataset.from_tensor_slices(track).batch(seq_length+1, drop_remainder=True)\n\n # For each example we have:\n # input is [n , n + seq_length]\n # output is [1 + n,1 + n + seq_length]\n def split_input_target(chunk):\n inputNoteSeq = chunk[:-1]\n targetNoteSeq = chunk[1:]\n return inputNoteSeq, targetNoteSeq\n\n # Turn the database of slices into a database of examples\n dataset = sequences.map(split_input_target)\n return dataset\n\n# Get the database of examples for a midi file\ndef fileToData(filename):\n tracks = getTracksFromMidi(filename)\n dataset = None\n for track in tracks:\n if dataset is None:\n dataset = mkDatasetFromTrack(track)\n else:\n dataset = dataset.concatenate(mkDatasetFromTrack(track))\n return dataset\n\n# Use up to this many files from the \"midis\" folder\n# to build the database of examples\nmax_files = 50\ndataset = None\nmidiFiles = os.listdir(\"clean_midi\")\nnp.random.shuffle(midiFiles)\nfor filename in midiFiles:\n max_files -= 1\n if max_files < 0:\n break\n fullPath = os.path.join(\"clean_midi\",filename)\n if not os.path.isfile(fullPath):\n continue\n print(\"Using\",fullPath)\n try:\n fileSet = fileToData(fullPath)\n if dataset is None:\n dataset = fileSet\n elif fileSet is not None:\n dataset = dataset.concatenate(fileSet)\n # Some MIDI files have data values > 127, which the mido library doesn't like\n # so it throws these errors. We catch them and ignore the culprit file\n except ValueError as err:\n print(\"!!! ValueError dealing with midi file:\",fullPath)\n print(\"!!!\",err)\n except OSError as err:\n print(\"!!! OSError dealing with midi file:\",fullPath)\n print(\"!!!\",err)\n except:\n print(\"!!! Other error dealing with midi file:\",fullPath)\n\n# Peel off the validation set\nvalidationSet = dataset.shard(num_shards=2,index=1).batch(50)\ndataset = dataset.shard(num_shards=2,index=0)\n\n# Batch the dataset into chunks of 50 examples each to make training more managable\ndataset = dataset.shuffle(100).batch(50, drop_remainder=True)\n\n\n# Switch to use fewer than the full dataset for fast training\nmax_batches = 10\nif max_batches > 0:\n dataset = dataset.take(max_batches)\n\n# Our model!\nmodel = keras.Sequential()\n# Input is some sequence of NoteSpec's of unspecified length\nmodel.add(layers.InputLayer(input_shape=(None,noteSpecSize)))\n# Use a Bidirectional LSTM to remember states an all that good stuff\nmodel.add(layers.Bidirectional(layers.LSTM(512,return_sequences=True)))\n# The dropout layer prevents overfitting (via black magic)\nmodel.add(layers.Dropout(0.2))\n# A finalizing simple neural network layer using relu because relu is love relu is life\nmodel.add(layers.Dense(noteSpecSize,activation = 'relu'))\n\nmodel.summary()\n\n# Write out a sequence of NoteSpec's to MIDI (using maximum probabilities)\n# Technically this is wrong and we should sample instead, but this works OK\ndef writeBatch(batch,from_logits=True,title=\"batch\"):\n mid = MidiFile()\n track = MidiTrack()\n mid.tracks.append(track)\n # Set grand piano\n track.append(Message('program_change', program=1, time=0))\n # volume up\n track.append(Message('control_change', control=7, value=127, time=0))\n # sustain pedal??\n # The dataset MIDIs do this and it seems to help\n track.append(Message('control_change', control=64, value=127, time=0))\n\n # This is almost exactly the inverse of the MIDI parsing stuff\n heldNotes = []\n for i in range(batch.shape[0]):\n timeDensity = batch[i,0].numpy()\n fockNote = batch[i,1:]\n fockIdx = maxFockIdx(fockNote,from_logits)\n newNotes = []\n for j in range(fockIdx + 1):\n thisNote = fockNote[fockOffsets[fockIdx] + j * singleNoteSize : fockOffsets[fockIdx] + (j + 1) * singleNoteSize]\n pitchPart = thisNote[:12]\n octavePart = thisNote[12:]\n\n # Here's where we forget anything with less than maximal probability\n bestPitch = np.argmax(pitchPart)\n bestOctave = np.argmax(octavePart)\n\n chosenMidiNote = 12 * bestOctave + bestPitch\n if chosenMidiNote > 0:\n newNotes.append(chosenMidiNote)\n\n for oldNote in heldNotes[:]:\n if oldNote not in newNotes:\n heldNotes.remove(oldNote)\n track.append(Message('note_off', note=min(oldNote,127), velocity=127, time=0))\n else:\n newNotes.remove(oldNote)\n\n for newNote in newNotes:\n #print(\"Outputing note\",newNote)\n track.append(Message('note_on', note=min(newNote,127), velocity=64, time=0))\n heldNotes.append(newNote)\n\n track[-1].time = 16 * max(int(timeDensity),1)\n\n mid.save(title + '.mid')\n\n# Helper to extract the most likely fock index (= note count - 1) from a\n# fock probability vector that might be using logits\ndef maxFockIdx(fockNote, from_logits = True):\n probs = []\n for i in range(fockSize):\n if from_logits:\n probs.append(np.sum(tf.nn.softmax(fockNote[fockOffsets[i] : fockOffsets[i + 1]]).numpy()))\n else:\n probs.append(np.sum(fockNote[fockOffsets[i] : fockOffsets[i + 1]]))\n return np.argmax(probs)\n\n# Display a sequence of NoteSpec's by printing it out, graphing it in a scatter plot, and writing a file\ndef displayBatch(batch,from_logits=True,title=\"batch\"):\n print(\"Time densities:\")\n print(batch[:,0].numpy())\n if from_logits:\n probs = tf.nn.softmax(batch[:,1:]).numpy()\n else:\n # Add epsilon to not break the plot axis\n probs = batch[:,1:]\n plt.scatter(range(len(probs[-1])),probs[-1],label=title)\n plt.yscale('log')\n writeBatch(batch,from_logits)\n\n# Display some information about the model with\n# its current parameters. Shows an example from the dataset\n# and how well it is predicted by the model\ndef displayAbout(model,title=\"Model\"):\n for exampleInput, target in validationSet.take(1):\n predict = model(exampleInput)\n\n #print(\"Input:\")\n #displayBatch(exampleInput[0],from_logits=False,title=\"Input\")\n #print()\n\n print(\"Target:\")\n displayBatch(target[0],from_logits=False,title=\"Target\")\n print()\n\n print(\"Predictions:\")\n displayBatch(predict[0],from_logits=True,title=\"Predictions\")\n print()\n\n plt.title(title)\n plt.legend()\n plt.show(block=True)\n\n # Plot the distribution of time density predictions over the whole batch vs the target\n # They should match for good networks, and if it just learns the time density mean, that\n # tells you it's bad\n plt.title(\"Time density distribution comparison for \" + title)\n plt.hist([target[0][:,0],predict[0][:,0]],label=[\"Target time density distribution\",\"Predicted time density distribution\"])\n #plt.vlines([np.mean(input_example_batch[0][:,0])],label=\"Mean actual time density\")\n plt.legend()\n plt.show(block=True)\n\n# The big loss function\n# Everything in here operates on symbolic, differentiable tensors\n# So the code is quite constrained\n# It also takes in an entire batch at once, hence all the [:,:,x] stuff\ndef lossfn(actual, pred):\n\n # First, compute the squared error loss for the time density\n predTimeDensity = pred[:,:,0]\n actualTimeDensity = actual[:,:,0]\n timeLoss = (predTimeDensity - actualTimeDensity) * (predTimeDensity - actualTimeDensity)\n\n #timeShapeLoss = tf.keras.losses.kullback_leibler_divergence(actualTimeDensity,predTimeDensity)\n\n # Next, compute the categorical entropy stuff\n # Firstly, for the fock index, and secondly for the pitch class and octave therein\n fockProbsAct = []\n fockProbsPre = []\n cce = []\n for i in range(fockSize):\n startIdx = 1 + fockOffsets[i]\n endIdx = 1 + fockOffsets[i + 1]\n\n logSoftPred = -tf.keras.backend.log(tf.nn.softmax(pred[:,:, startIdx : endIdx]))\n\n fockProbsAct.append(tf.keras.backend.sum(actual[:,:,startIdx : endIdx],axis=2))\n fockProbsPre.append(tf.keras.backend.sum(logSoftPred,axis=2))\n\n chunkOnNotesAct = tf.stack(tf.split(actual[:,:,startIdx : endIdx],singleNoteSize,axis=2),axis=0)\n chunkOnNotesPred = tf.stack(tf.split(logSoftPred,singleNoteSize,axis=2),axis=0)\n\n symmedAct = tf.keras.backend.mean(chunkOnNotesAct,axis=0)\n symmedPred = tf.keras.backend.mean(chunkOnNotesPred,axis=0)\n\n # Hacky way to make it compute the p \\cdot log q stuff\n\n # Make a stack on axis=0 of [actual,log(softmax(pred))], then tf.prod along axis=0\n stacked = tf.stack([symmedAct, symmedPred],axis=0)\n cce.append(tf.keras.backend.sum(tf.keras.backend.prod(stacked,axis = 0),axis=2))\n\n fockProbsActTensor = tf.keras.backend.stack(fockProbsAct,axis=2)\n fockProbsPreTensor = tf.keras.backend.stack(fockProbsPre,axis=2)\n cceTensor = tf.keras.backend.stack(cce,axis=2)\n actualFockIdx = tf.math.argmax(fockProbsActTensor,axis=2)\n\n # This is the entropy from correctly categorizing the number of notes to play simultaneously\n fockIndexEntropy = -tf.math.log(tf.gather(fockProbsPreTensor,actualFockIdx,batch_dims=2))\n\n cceEntropy = tf.gather(cceTensor,actualFockIdx,batch_dims=2)\n\n alpha = 0.2\n return alpha * timeLoss + fockIndexEntropy + cceEntropy\n\nmodel.compile(optimizer='adam', loss=lossfn, metrics=['accuracy'])\n\n\n# Directory where the checkpoints will be saved\ncheckpoint_dir = './training_checkpoints'\n# Name of the checkpoint files\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt_{epoch}\")\n\ncheckpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_prefix,\n save_weights_only=True)\n\n# If the checkpoint directory exists, try to load weights from it\n# To not use the old checkpoint, just delete the directory\nif os.path.isdir(checkpoint_dir):\n model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))\n\ndef sampleForwardModel(model,n=50):\n for inp, tar in validationSet.take(1):\n x = inp\n for i in range(50):\n x = model(x)\n displayBatch(x)\n\ndisplayAbout(model,title=\"Existing model\")\nsel = input(\"Go?\")\n\nif len(sel) > 0 and sel[0] == \"g\":\n sampleForwardModel(model)\n\nif sel != \"y\":\n quit()\n\nmodel.fit(dataset, epochs=5, callbacks=[checkpoint_callback])\n\nprint(model.evaluate(validationSet))\ndisplayAbout(model,title=\"Newly trained model\")\n\n",
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import minimize\nfrom scipy.io import wavfile\nfrom scipy.signal import hilbert\nfrom scipy.special import binom\nfrom mido import MidiFile\nimport itertools\n\nimport nonlin\n\nfockSize = 10\n# Pitch class, octave\nsizeOfNoteSpec = 12 + 1\nsizeOfFockNoteSpec = 0\nfockOffsets = []\nfor k in range(fockSize + 1):\n if k < fockSize:\n fockOffsets.append(sizeOfFockNoteSpec + sizeOfNoteSpec * k)\n sizeOfFockNoteSpec += sizeOfNoteSpec * k\n\nprint(\"Fock size total:\",sizeOfFockNoteSpec)\nprint(\"Fock offsets:\",fockOffsets)\n# Include time density!\nsizeEpoch = 1 + sizeOfFockNoteSpec\n\nohbMatrix = np.eye(12)\n\n# Build a NoteSpec out of the current midi situation during this particular epoch\ndef mkNoteSpec(heldNotes,decayingNotes,timeDensity):\n allNotes = heldNotes + decayingNotes\n noteCount = len(allNotes)\n if noteCount > fockSize:\n print(\"!!! Warning, fock size of {} exceeded by {} simultaneous notes !!!\".format(fockSize,noteCount))\n allNotes = allNotes[:fockSize]\n noteCount = fockSize\n fOff = fockOffsets[noteCount - 1]\n fockVec = np.zeros(sizeOfFockNoteSpec)\n\n contribs = []\n for k in range(noteCount):\n (octave,pc) = midiNoteToRepr(allNotes[k])\n pcVec = np.zeros(12)\n pcVec[pc] = 1\n nOff = fOff + k * sizeOfNoteSpec\n fockVec[nOff : nOff + sizeOfNoteSpec] = np.append(np.matmul(ohbMatrix,pcVec),octave)\n #print(pc)\n epoch = np.insert(fockVec,0,timeDensity)\n return epoch\n\ndef traceNoteSpec(ns):\n mess = \"Time Density: {}, note probabilites:\".format(ns[0])\n for k in range(fockSize):\n s = 1 + fockOffsets[k]\n fock = ns[s : s + (k + 1) * sizeOfNoteSpec]\n prob = np.linalg.norm(fock)\n if prob > 0:\n mess += \"\\n{:.2f} for {} notes (\".format(prob,k + 1)\n for l in range(k + 1):\n # minus one to forget octave\n noteStart = l * sizeOfNoteSpec\n thisNote = fock[noteStart : noteStart + sizeOfNoteSpec - 1]\n thisOctave = fock[noteStart + sizeOfNoteSpec - 1]\n mess += \"{}^{}, \".format(np.argwhere(thisNote).flatten(),thisOctave)\n mess = mess[:-2] + \")\"\n return mess\n\n# Midi should have octave in integers [0,10] (so eleven octaves)\n# Returns (octave,pitchClass)\ndef midiNoteToRepr(midiNote):\n return divmod(midiNote,12)\n\nmid = MidiFile('stayorgo.mid')\n\ntracks = []\nfor i, track in enumerate(mid.tracks):\n print('Track {}: {}'.format(i, track.name))\n heldNotes = []\n toUnHold = []\n lastTime = 0\n noteSpecs = []\n for msg in track:\n if msg.time != 0:\n #print()\n #print(\"Held\",heldNotes,\"with these ones decaying:\",toUnHold,\"for time:\",lastTime)\n #print()\n noteSpecs.append(mkNoteSpec(heldNotes,toUnHold,min(lastTime,300)))\n #print(traceNoteSpec(noteSpecs[-1]))\n for n in toUnHold:\n heldNotes.remove(n)\n toUnHold = []\n lastTime = msg.time\n if msg.type == 'note_on':\n #print(\"Note\",msg.note,\"on with time=\",msg.time)\n if msg.note not in heldNotes:\n heldNotes.append(msg.note)\n elif msg.type == 'note_off':\n #print(\"Note\",msg.note,\"off with time=\",msg.time)\n if msg.note in heldNotes:\n toUnHold.append(msg.note)\n else:\n #print(msg)\n x=1\n tracks.append(np.stack(noteSpecs,axis=0))\n print(\"Found {} Note Specs\\n\".format(len(noteSpecs)))\n\n\n# Memoryless predictor network\ndef stepPredictLoss(predictor,track):\n print(\"Finding step loss...\")\n totalLoss = 0\n lastNoteSpec = None\n for noteSpec in track:\n if lastNoteSpec is not None:\n predicted = nonlin.applyNonlinear(predictor,lastNoteSpec)\n totalLoss += np.linalg.norm(predicted - noteSpec)\n lastNoteSpec = noteSpec\n return totalLoss\n\ndef timePredictLoss(predictor,track):\n totalLoss = 0\n for k in range(track.shape[0] - predictorSideLength):\n i = k + predictorSideLength\n predicted = nonlin.applyNonlinear(predictor,track[k:i,0])[0]\n totalLoss += np.abs(predicted - track[i,0]) ** 2\n return np.log(totalLoss)\n\ndef plotTimePredictLoss(predictor,track):\n totalLoss = 0\n losses = []\n ks = range(track.shape[0] - predictorSideLength)\n for k in ks:\n i = k + predictorSideLength\n predicted = nonlin.applyNonlinear(predictor,track[k:i,0])[0]\n totalLoss += np.abs(predicted - track[i,0]) ** 2\n losses.append(totalLoss)\n return losses\n\n\n\npredictorDepth = 2\npredictorSideLength = 3\nidentityPredictor = nonlin.identityNonLin(sizeEpoch,predictorDepth)\nidentityTimePredictor = nonlin.identityNonLin(predictorSideLength,predictorDepth)\nprint(\"Computing identity time loss on Track 1...\")\nprint(\"Total Loss\",timePredictLoss(identityTimePredictor,tracks[1]))\n\ndef minCB(params):\n predictor = nonlin.deserializeNonLin(params,predictorSideLength,predictorDepth)\n print(predictor)\n for l in range(10):\n k = l + 200\n i = k + predictorSideLength\n predicted = nonlin.applyNonlinear(predictor,tracks[1][k:i,0])[0]\n print(\"Predicted: \",predicted)\n print(\"Actual: \",tracks[1][i,0])\n print(\"Loss: \",np.abs(predicted - tracks[1][i,0]) ** 2)\n print()\n print(\"Step Loss: \",timePredictLoss(predictor,tracks[1]))\n allLosses.append(plotTimePredictLoss(predictor,tracks[1]))\n\ndef toMinimize(params):\n predictor = nonlin.deserializeNonLin(params,predictorSideLength,predictorDepth)\n return timePredictLoss(predictor,tracks[1])\n\ninput(\"...\")\nallLosses = []\nminResult = minimize(toMinimize,nonlin.serializeNonLin(identityTimePredictor),callback=minCB,options={'disp':True,'maxiter':10})\nprint(minResult)\n\nfor i in range(len(allLosses)):\n losses = allLosses[i]\n plt.plot(range(len(losses)),losses, color = str(1-(i/len(allLosses))))\n\nplt.title('Cumulative loss during track, progressive iterations')\nplt.xlabel('Note in Track')\nplt.ylabel('Cumulative Loss')\nplt.show(block=True)\n\nplt.hist(np.diff(allLosses[-1]))\nplt.title('Distribution of losses')\nplt.show(block=True)\n"
] |
[
[
"matplotlib.pyplot.legend",
"tensorflow.keras.backend.prod",
"tensorflow.stack",
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.InputLayer",
"tensorflow.math.argmax",
"numpy.stack",
"tensorflow.gather",
"numpy.argmax",
"numpy.insert",
"numpy.zeros",
"tensorflow.keras.callbacks.ModelCheckpoint",
"matplotlib.pyplot.title",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.backend.sum",
"tensorflow.split",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"numpy.sum",
"tensorflow.nn.softmax",
"tensorflow.train.latest_checkpoint",
"numpy.set_printoptions",
"tensorflow.data.Dataset.from_tensor_slices",
"matplotlib.pyplot.yscale",
"numpy.random.shuffle",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.backend.stack",
"tensorflow.keras.backend.mean",
"tensorflow.keras.layers.Dropout"
],
[
"numpy.log",
"numpy.abs",
"matplotlib.pyplot.title",
"numpy.eye",
"numpy.matmul",
"numpy.linalg.norm",
"numpy.stack",
"numpy.argwhere",
"numpy.diff",
"numpy.insert",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.ylabel"
]
] |
thbeutin/csep2
|
[
"79bc5c77c90f4dd13dae69425d3cb39aabcec7bf"
] |
[
"examples/catalog_interactions.py"
] |
[
"import time\nimport os\nimport numpy\nimport matplotlib.pyplot as pyplot\n\nfrom csep.core.catalogs import UCERF3Catalog, ComcatCatalog\nfrom csep.utils.plotting import plot_cumulative_events_versus_time, plot_magnitude_versus_time\n\n\"\"\"\nNote:\n This script requires about 12-14Gb of Ram because generators are not implemented for the plots.\n\"\"\"\n\n# UCERF3 Synthetics\nucerf3_numbers = []\nnofaults_numbers = []\nmin_magnitude = []\n\nproject_root = '/Users/wsavran/Projects/CSEP2/u3etas_simulations/landers_experiment'\nfilename = os.path.join(project_root, '10-23-2018_landers-pt1/results_complete.bin')\nfilename_nofaults = os.path.join(project_root, '10-31-2018_landers-nofaults-pt1/results_complete.bin')\n\nt0 = time.time()\nu3catalogs = UCERF3Catalog.load_catalogs(filename=filename, name='UCERF3-ETAS')\n# Example of functional programming to apply function to stochastic event set\nu3catalogs_filt = list(map(lambda x: x.filter('magnitude > 3.95'), u3catalogs))\nt1 = time.time()\nprint('Loaded {} UCERF3 catalogs in {} seconds.\\n'.format(len(u3catalogs_filt), (t1-t0)))\n\n# Get number of events\nucerf3_numbers = []\nfor u3catalog in u3catalogs_filt:\n ucerf3_numbers.append(u3catalog.get_number_of_events())\n\nt0 = time.time()\nu3catalogs_nf = UCERF3Catalog.load_catalogs(filename=filename_nofaults, name='UCERF3-NoFaultsETAS')\nu3catalogs_nf_filt = list(map(lambda x: x.filter('magnitude > 3.95'), u3catalogs_nf))\nt1 = time.time()\nprint('Loaded {} UCERF3 catalogs in {} seconds.\\n'.format(len(u3catalogs_nf_filt), (t1-t0)))\n\n# Number of events for ucerf3-nofaults, example as list-comprehension instead of for loop\nnofaults_numbers = [x.get_number_of_events() for x in u3catalogs_nf_filt]\n\n# Comcat Synthetics\nepoch_time = 709732655000\nduration_in_years = 1.0\nt0 = time.time()\ncomcat = ComcatCatalog(start_epoch=epoch_time, duration_in_years=1.0, name='Comcat',\n min_magnitude=2.55,\n min_latitude=31.50, max_latitude=43.00,\n min_longitude=-125.40, max_longitude=-113.10,)\n\ncomcat_filt = comcat.filter('magnitude > 3.95')\nt1 = time.time()\nprint(\"Fetched Comcat catalog in {} seconds.\\n\".format(t1-t0))\nprint(\"Downloaded Comcat Catalog with following parameters\")\nprint(\"Start Date: {}\\nEnd Date: {}\".format(str(comcat.start_time), str(comcat.end_time)))\nprint(\"Min Latitude: {} and Max Latitude: {}\".format(comcat.min_latitude, comcat.max_latitude))\nprint(\"Min Longitude: {} and Max Longitude: {}\".format(comcat.min_longitude, comcat.max_longitude))\nprint(\"Min Magnitude: {}\\n\".format(comcat.min_magnitude))\n\ncomcat_count = comcat_filt.get_number_of_events()\n\n\nprint(\"Statements about Catalog Statistics\")\nprint(\"Found {} events in the Comcat catalog.\\n\".format(comcat_count))\n\nprint(\"Found {} events in the UCERF3 catalog with lowest number of events.\".format(numpy.min(ucerf3_numbers)))\nprint(\"Found {} events in the UCERF3 catalog with max number of events.\".format(numpy.max(ucerf3_numbers)))\nprint(\"In UCERF3 the median events were {} and the mean events were {}.\\n\"\n .format(numpy.median(ucerf3_numbers),numpy.mean(ucerf3_numbers)))\n\nprint(\"Found {} events in the UCERF3-NoFaults catalog with lowest number of events.\".format(numpy.min(nofaults_numbers)))\nprint(\"Found {} events in the UCERF3 catalog with max number of events.\".format(numpy.max(nofaults_numbers)))\nprint(\"In UCERF3-Nofaults the median events were {} and the mean events were {}.\\n\"\n .format(numpy.median(nofaults_numbers),numpy.mean(nofaults_numbers)))\n\n# Plotting\n\n# Showing Raw plotting with matplotlib\nucerf3_numbers = numpy.array(ucerf3_numbers)\nnofaults_numbers = numpy.array(nofaults_numbers)\nfig = pyplot.figure()\npyplot.hist(ucerf3_numbers, bins=60, color='blue', edgecolor='black', alpha=0.7, label='UCERF3-ETAS')\npyplot.hist(nofaults_numbers, bins=60, color='green', edgecolor='black', alpha=0.7, label='UCERF3-NoFaults')\npyplot.axvline(x=comcat_count, linestyle='--', color='black', label='Comcat')\npyplot.xlabel('Event Count')\npyplot.ylabel('Frequency')\npyplot.xlim([0, numpy.max(numpy.vstack((ucerf3_numbers, nofaults_numbers)))])\npyplot.legend(loc='best')\n\n# Plot cumulative events\nax = plot_cumulative_events_versus_time(u3catalogs_filt, comcat_filt)\nax = plot_cumulative_events_versus_time(u3catalogs_nf_filt, comcat_filt)\n\n# Plot magnitude versus time\nplot_magnitude_versus_time(comcat_filt)\nplot_magnitude_versus_time(u3catalogs_nf_filt[0], show=True)\nplot_magnitude_versus_time(u3catalogs_filt[0], show=True)\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axvline",
"numpy.min",
"numpy.median",
"numpy.max",
"matplotlib.pyplot.ylabel",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.hist",
"numpy.vstack",
"matplotlib.pyplot.figure"
]
] |
jarekj71/salbec
|
[
"4aa85bcc5b501d641d78d93794355be2f1a75047"
] |
[
"GUI/surface.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 10 15:55:55 2020\n\n@author: jarekj\n\"\"\"\n\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtCore import (pyqtSignal)\nimport pickle, os, sys\nimport pandas as pd\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nimport matplotlib\nmatplotlib.use('Qt5Agg')\n\nfrom soilalbedo import soilModel\nfrom GUI.baseGui import baseGui\n\nclass curvePlot(QtWidgets.QDialog,baseGui):\n def __init__(self,curve=None):\n super().__init__()\n \n if curve is None:\n return\n \n #self.setFixedSize(600,500)\n self.setWindowTitle(\"Fitted curve\")\n figure = Figure(figsize=(8,6))\n canvas = FigureCanvas(figure)\n mainLayout = QtWidgets.QVBoxLayout()\n imageLayout = QtWidgets.QHBoxLayout()\n descLayout = QtWidgets.QHBoxLayout()\n self.curve = curve\n\n #button Layout\n buttonLayout = QtWidgets.QHBoxLayout()\n exportButton = QtWidgets.QPushButton(\"&EXPORT\")\n plotButton = QtWidgets.QPushButton(\"&PLOT\")\n closeButton = QtWidgets.QPushButton(\"&CLOSE\")\n buttonLayout.addStretch()\n buttonLayout.addWidget(exportButton)\n buttonLayout.addWidget(plotButton)\n buttonLayout.addWidget(closeButton)\n\n #image layout\n imageLayout.addWidget(canvas)\n\n #paramslayout\n self.soilParams = curve.get_soil_params()\n descLayout = QtWidgets.QVBoxLayout()\n text = \"\"\n for name,value in self.soilParams:\n if name =='a45':\n value = round(value,6)\n text += \" {}: {}\".format(name,value)\n descLayout.addWidget(QtWidgets.QLabel(text))\n \n self.soilModel = curve.get_model_coefs()\n text = \"\"\n for name,value in zip(['a','b','c','d'],self.soilModel):\n text += \" {}: {}\".format(name,round(value,8))\n descLayout.addWidget(QtWidgets.QLabel(text))\n\n #main layout\n mainLayout.addLayout(buttonLayout)\n mainLayout.addLayout(imageLayout)\n mainLayout.addLayout(descLayout)\n self.setLayout(mainLayout)\n \n plotButton.clicked.connect(self.plotButton_clicked)\n closeButton.clicked.connect(self.reject)\n exportButton.clicked.connect(self.exportButton_clicked)\n\n figure.clear()\n ax = figure.add_subplot(111)\n self.curve.drawFitted(ax)\n canvas.draw()\n \n def plotButton_clicked(self):\n if self.curve is None:\n self._e.warning(\"Nothing to plot\",None)\n return \n filetypes = \"pdf (*pdf);;png (*png);;svg (*svg)\"\n fileName,fileType = QtWidgets.QFileDialog.\\\n getSaveFileName(self,\"File to plot results\", self.outputDir,filetypes)\n if fileName==\"\":\n return\n \n file,ext = os.path.splitext(fileName)\n if ext not in ['.pdf','.png','.svg']:\n fileName = fileName+\".\"+fileType[:3]\n \n fig,ax = matplotlib.pyplot.subplots(1,1,figsize=(8,4),dpi=150)\n self.curve.drawFitted(ax)\n fig.savefig(fileName)\n self.message(\"File {} plotted\".format(os.path.basename(fileName)))\n \n def exportButton_clicked(self):\n curveData = self.curve.exportCurve()\n parameters = self.curve.exportParams()\n \n filetypes = \"Excel (*.xlsx)\"\n fileName,_ = QtWidgets.QFileDialog.\\\n getSaveFileName(self,\"File to save results\", self.outputDir,filetypes) \n \n if fileName==\"\":\n return\n file,ext = os.path.splitext(fileName)\n \n if ext != '.xlsx':\n fileName = fileName+'.xlsx'\n\n writer = pd.ExcelWriter(fileName)\n curveData.to_excel(writer,sheet_name='curve') \n parameters.to_excel(writer,sheet_name='parameters')\n writer.save()\n self.message(\"File {} exported\".format(os.path.basename(fileName)))\n\n\nclass curveFitWidget(QtWidgets.QWidget,baseGui):\n coordSignal = pyqtSignal()\n def __init__(self,collections):\n super().__init__()\n\n gridLayout = QtWidgets.QGridLayout()\n mainLayout = QtWidgets.QHBoxLayout()\n self._collections = collections\n self.curve = None\n self._soil = None\n \n #SOIL\n self.soilCombo = QtWidgets.QComboBox()\n self.soilCombo.setMinimumWidth(150)\n self.soilCombo.setModel(self._collections.selectionModel)\n self.soilCombo.currentTextChanged.connect(self.soilCombo_currentTextChanged)\n soilLabel = QtWidgets.QLabel('&Soil')\n soilLabel.setBuddy(self.soilCombo)\n \n gridLayout.addWidget(soilLabel,0,0)\n gridLayout.addWidget(self.soilCombo,1,0)\n \n #PARANS\n self.T3DEdit = QtWidgets.QLineEdit('1.5')\n self.T3DEdit.setMaximumWidth(40)\n label = QtWidgets.QLabel('&T3D (ratio)')\n label.setBuddy(self.T3DEdit)\n gridLayout.addWidget(label,0,1)\n gridLayout.addWidget(self.T3DEdit,0,2) \n \n self.HSDEdit = QtWidgets.QLineEdit('25')\n self.HSDEdit.setMaximumWidth(40)\n label = QtWidgets.QLabel('&HSD (mm)')\n label.setBuddy(self.HSDEdit) \n gridLayout.addWidget(label,1,1)\n gridLayout.addWidget(self.HSDEdit,1,2)\n \n self.T3DEdit.editingFinished.connect(lambda: self.validate_textEdit(1.005,2.5,1.5,\"T3D\"))\n self.HSDEdit.editingFinished.connect(lambda: self.validate_textEdit(0.3,100,25,\"HSD\"))\n \n pltButton = QtWidgets.QPushButton(\"&SHOW\")\n pltButton.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Preferred)\n pltButton.setToolTip(\"Show fitted soil curve\")\n pltButton.clicked.connect(self.plotCurve)\n mainLayout.addLayout(gridLayout)\n mainLayout.addWidget(pltButton)\n \n self.setLayout(mainLayout)\n self.soilCombo_currentTextChanged()\n\n \n def coordinates(self):\n if self._soil is not None:\n return self._soil['coords']\n return None,None\n \n def soilCombo_currentTextChanged(self):\n soilName = self.soilCombo.currentText()\n if soilName =='':\n return\n soilPath = self._collections.getSoilsDatabase().getPath(soilName)\n self._soil = pickle.load(open(soilPath,\"rb\"))\n self.coordSignal.emit()\n self.curve = None\n\n def fitCurve(self):\n try:\n T3D = float(self.T3DEdit.text())\n except ValueError:\n self.T3DEdit.clear()\n return\n \n try:\n HSD = float(self.HSDEdit.text())\n except ValueError:\n self.HSDEdit.clear()\n return\n\n self.curve = soilModel()\n self.curve.fit(self._soil['spectra'],T3D,HSD,self._soil['name'])\n \n def plotCurve(self):\n self.fitCurve()\n curvePlotDialog = curvePlot(self.curve) # dialog class\n curvePlotDialog.show()\n curvePlotDialog.exec_()\n \n def getCurve(self):\n if self.curve is None:\n return None\n return self.curve.get_model_coefs()\n\n def getSoilParams(self):\n return self.curve.get_soil_params() \n\n def refreshSoilCombo(self):\n if not self._collections.isActiveSelection():\n self._collections.setActiveSelection() #all soils\n"
] |
[
[
"matplotlib.figure.Figure",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"pandas.ExcelWriter",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg"
]
] |
Novandev/gn_api
|
[
"08b071ae3916bb7a183d61843a2cd09e9fe15c7b"
] |
[
"recommendation_engines/similar_songs.py"
] |
[
"import sys,os\nimport pandas as pd\nfrom sklearn.neighbors import NearestNeighbors\nimport pickle\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\nfrom config import elastic\n# Based on the data\n\n\n\nclass Similar():\n\n \"\"\"\n This class is used to find silimar songs by emotional score using K-Nearest Neighbors.\n It first checks for the presence of a pretrained model in .sav format to cu down on processing time.\n if one is not found, it pulls data from ElasticSearch to train and fit the model.\n \n \"\"\"\n def __init__(self):\n try: # load both models\n self.similar_songs_model = pickle.load(open('./saved_models/closest_songs_model.sav', 'rb'))\n self.artist_info = pickle.load(open('./saved_models/artist_info.sav', 'rb'))\n except FileNotFoundError: # if either model isnt present then...\n results_dictionary = elastic().search(index='songs',size=100)['hits']['hits']\n data = pd.DataFrame([song['_source']['doc'] for song in results_dictionary])\n self.artist_info = data[['artist','title']]\n emotions = data[['anger','fear','joy','sadness','surprise']]\n similar_songs_model = NearestNeighbors(n_neighbors=5)\n self.similar_songs_model = similar_songs_model.fit(emotions)\n # The following writes the pickle files out\n pickle.dump(self.similar_songs_model, open('./saved_models/closest_songs_model.sav', 'wb'))\n pickle.dump(self.artist_info, open('./saved_models/artist_info.sav', 'wb')) # this needs to be here so artist info can be gather for processing later\n \n def predict(self,emotion_array):\n \"\"\"\n Used for the API endpoint, this returns the artists and songs that are simmilar based on our KNN model\n \n \"\"\"\n _,indecies = self.similar_songs_model.kneighbors([emotion_array])\n return [dict(self.artist_info.iloc[row]) for row in range(len(indecies[0]))] #r returns a dictionart of the artists and songs that match \nif __name__ == \"__main__\":\n test = Similar()\n print(test.predict([0.3,0.4,0.7,0.9,0.7]))"
] |
[
[
"sklearn.neighbors.NearestNeighbors",
"pandas.DataFrame"
]
] |
CorentinJ/WeatherCollector
|
[
"4c7fa3ca17bf2da0b49120ca7c1cbefe7be91a92"
] |
[
"station.py"
] |
[
"from csvtable import CsvTable\r\nfrom csvtable import cache_dir\r\nfrom urllib import request\r\nfrom urllib.error import HTTPError, URLError\r\nfrom datetime import datetime\r\nimport numpy as np\r\nimport os\r\nimport gzip\r\nimport time\r\nfrom shapely.geometry.point import Point\r\n\r\nclass Station:\r\n time_format = \"%Y%m%d\" # YYYYMMDD\r\n \r\n def __init__(self, d):\r\n self.name = d[\"stationname\"]\r\n self.usaf = d[\"usaf\"] # Air Force station ID (this is a string)\r\n self.wban = d[\"wban\"] # NCDC WBAN number (also a string)\r\n self.icao = d[\"icao\"] # ICAO ID\r\n self.country = d[\"ctry\"] # Country\r\n self.state = d[\"state\"] # State for US stations\r\n self.latitude = d[\"lat\"] # Latitude in thousandths of decimal degrees\r\n self.longitude = d[\"lon\"] # Longitude in thousandths of decimal degrees\r\n self.elevation = d[\"elevm\"] # Elevation in meters\r\n \r\n # Start period of record (YYYYMMDD)\r\n self.record_start = datetime.strptime(d[\"begin\"], Station.time_format).date()\r\n # End period of record (YYYYMMDD)\r\n self.record_end = datetime.strptime(d[\"end\"], Station.time_format).date() \r\n\r\n # Courtesy of https://andrew.hedges.name/experiments/haversine/\r\n @staticmethod\r\n def distance(lat1, long1, lat2, long2):\r\n dlat = lat2 - lat1\r\n dlon = long2 - long1\r\n a = np.square(np.sin(np.deg2rad(dlat / 2))) + \\\r\n np.cos(np.deg2rad(lat2)) * \\\r\n np.cos(np.deg2rad(lat1)) * \\\r\n np.square(np.sin(np.deg2rad(dlon / 2)))\r\n c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))\r\n return c * 6373 # Radius of the earth in kilometers\r\n \r\n def distance_from(self, latitude, longitude):\r\n return Station.distance(self.latitude, self.longitude, latitude, longitude)\r\n\r\n def retrieve_obs(self, year):\r\n # Ensure the year is within this station's recording range\r\n if year < self.record_start.year or year > self.record_end.year:\r\n print(\"Station \" + self.usaf + \" has no observations for %d.\" % year)\r\n return None\r\n\r\n # Find the url and filename\r\n filename = self.usaf + \"-\" + self.wban + \"-\" + str(year) + \".op.gz\"\r\n filepath = os.path.join(cache_dir, filename)\r\n if os.path.exists(filepath):\r\n age = int(time.time() - os.path.getmtime(filepath))\r\n current_year = int(time.strftime(\"%Y\"))\r\n if year == current_year and age > 24 * 3600:\r\n # Re-download observation for the current year if they are over 24 hours old\r\n print(\"Cached file \" + filename + \" is outdated.\")\r\n else:\r\n print(\"File \" + filename + \" found in cache.\")\r\n return Station.parse_gsod_data(filepath)\r\n\r\n # Retrieve the .op file\r\n url = \"https://www1.ncdc.noaa.gov/pub/data/gsod/\" + str(year) + \"/\" + filename\r\n print(\"Downloading \" + filename + \"...\", end=' ')\r\n try:\r\n request.urlretrieve(url, filepath)\r\n except HTTPError as err:\r\n if err.code == 404:\r\n print(\"Failed: does not exist\")\r\n else:\r\n print(\"Failed with HTTP code %d\" % err.code)\r\n return None\r\n except URLError:\r\n print(\"Name could not be resolved, server is likely down (again)\")\r\n raise Exception(\"Gotta wait a bit\")\r\n print(\"Succeeded.\")\r\n\r\n return None if filepath is None else Station.parse_gsod_data(filepath)\r\n\r\n # See ftp://ftp.ncdc.noaa.gov/pub/data/gsod/GSOD_DESC.txt\r\n @staticmethod\r\n def parse_gsod_data(op_filepath):\r\n # Read the archive\r\n gz_reader = gzip.GzipFile(op_filepath, 'rb')\r\n contents = gz_reader.read().decode(\"utf-8\")\r\n gz_reader.close()\r\n \r\n # Parse the data (we have to use the indices here because .op files are formatted by \r\n # character alignment and not with separators like .csv files)\r\n dates = []\r\n data = []\r\n for line in contents.split(\"\\n\")[1:]:\r\n if line == \"\": \r\n continue\r\n \r\n dates.append(line[14:22])\r\n datum = {\r\n \"temp\": float(line[24:30]),\r\n \"dewp\": float(line[35:41]),\r\n \"slp\": float(line[46:52]),\r\n \"stp\": float(line[57:63]),\r\n \"visib\": float(line[68:73]),\r\n \"wdsp\": float(line[78:83]),\r\n \"mxspd\": float(line[88:93]),\r\n \"gust\": float(line[95:100]),\r\n \"max\": float(line[102:108]),\r\n \"min\": float(line[110:116]),\r\n \"prcp\": float(line[118:123]),\r\n \"sndp\": float(line[125:130]),\r\n \"fog\": bool(int(line[132])),\r\n \"rain\": bool(int(line[133])),\r\n \"snow\": bool(int(line[134])),\r\n \"hail\": bool(int(line[135])),\r\n \"thunder\": bool(int(line[136])),\r\n \"tornado\": bool(int(line[137])),\r\n }\r\n \r\n # Deal with missing values\r\n for attribute in [\"temp\", \"dewp\", \"slp\", \"stp\", \"max\", \"min\"]:\r\n if datum[attribute] == 9999.9:\r\n datum[attribute] = None\r\n for attribute in [\"visib\", \"wdsp\", \"mxspd\", \"gust\", \"sndp\"]:\r\n if datum[attribute] == 999.9:\r\n datum[attribute] = None\r\n \r\n # Special flag for precipitations\r\n if line[123] == 'I' or datum[\"prcp\"] == 99.99:\r\n datum[\"prcp\"] = None\r\n if not datum[\"rain\"] and datum[\"prcp\"] is None:\r\n datum[\"prcp\"] = 0.0\r\n\r\n # Special flag for the snow\r\n if not datum[\"snow\"] and datum[\"sndp\"] is None:\r\n datum[\"sndp\"] = 0.0\r\n\r\n data.append(datum)\r\n \r\n return dict((date, datum) for (date, datum) in zip(dates, data))\r\n \r\n def get_key(self):\r\n return Station.as_key(self.usaf, self.wban)\r\n\r\n @staticmethod\r\n def as_key(usaf, wban):\r\n return usaf + str(wban)\r\n\r\n def is_valid(self):\r\n return self.usaf and self.wban and self.longitude and self.latitude\r\n\r\n @staticmethod\r\n def get_stations(start_date=None, end_date=None):\r\n # Filter stations that have no observation within the time range\r\n stations = list(station_table.values())\r\n \r\n if start_date is not None:\r\n stations = [station for station in stations if station.record_end > start_date]\r\n if end_date is not None:\r\n stations = [station for station in stations if station.record_start < end_date]\r\n \r\n return stations\r\n \r\n @staticmethod\r\n def find_closest_stations(latitude, longitude, max_dist=None, start_date=None, end_date=None):\r\n stations = Station.get_stations(start_date, end_date)\r\n \r\n # Evaluate the distance with all stations\r\n distances = np.array([station.distance_from(latitude, longitude) for \r\n station in stations])\r\n\r\n # Sort stations based on their distance\r\n closest = list(zip(stations, distances))\r\n closest.sort(key=lambda x: x[1])\r\n\r\n # Remove stations that are too far away\r\n if max_dist is not None:\r\n closest = closest[:np.sum(distances <= max_dist)]\r\n\r\n # Return the sorted stations and distances\r\n return closest\r\n \r\n @staticmethod\r\n def find_stations_in_geometry(shape, contour_dist=0, start_date=None, end_date=None):\r\n stations = Station.get_stations(start_date, end_date)\r\n \r\n # For performance purposes, find a cutoff distance beyond which stations are ignored\r\n center = shape.centroid\r\n hull_points = [Point(x, y) for x, y in zip(*shape.convex_hull.exterior.xy)]\r\n furthest_point = max(hull_points, key=lambda x: center.distance(x))\r\n max_dist = Station.distance(center.y, center.x, furthest_point.y, furthest_point.x)\r\n max_dist += contour_dist\r\n\r\n # Evaluate the distance with all stations\r\n shapes = shape if shape.geom_type == 'MultiPolygon' else [shape]\r\n distances = []\r\n for station in stations:\r\n # First get an approximate distance from the centroid\r\n distance_approx = station.distance_from(center.y, center.x)\r\n if distance_approx > max_dist:\r\n distances.append(None)\r\n continue\r\n \r\n # Points inside the borders have a distance of 0\r\n station_point = Point(station.longitude, station.latitude)\r\n if any(sub_shape.contains(station_point) for sub_shape in shapes):\r\n distances.append(0)\r\n continue\r\n \r\n # Otherwise, evaluate the real distance from the region borders\r\n distance = 99999\r\n for sub_shape in shapes:\r\n exterior = sub_shape.exterior\r\n projection = exterior.interpolate(exterior.project(station_point))\r\n distance = min(distance, station.distance_from(projection.y, projection.x))\r\n distances.append(distance if distance < contour_dist else None)\r\n distances = np.array(distances)\r\n\r\n # Sort stations based on their distance\r\n closest = [(station, distance) for station, distance in zip(stations, distances) if\r\n distance is not None]\r\n closest.sort(key=lambda x: x[1])\r\n\r\n # Return the sorted stations and distances\r\n return closest\r\n\r\n\r\n# See ftp://ftp.ncdc.noaa.gov/pub/data/noaa/isd-history.txt\r\nstation_table = CsvTable(\"ftp://ftp.ncdc.noaa.gov/pub/data/noaa/isd-history.csv\",\r\n format=[str] * 6 + [float] * 3 + [str] * 2,\r\n entry_type=Station,\r\n key=Station.get_key)\r\n\r\n"
] |
[
[
"numpy.deg2rad",
"numpy.array",
"numpy.sum",
"numpy.sqrt"
]
] |
carlospatinos/realtime-vad
|
[
"abfe1e65bfdbb392543c5c15801457144795ff4a"
] |
[
"plotInput.py"
] |
[
"#!/usr/bin/env python3\n\"\"\"Plot the live microphone signal(s) with matplotlib.\n\nMatplotlib and NumPy have to be installed.\n\n\"\"\"\nimport argparse\nimport queue\nimport sys\n\nfrom matplotlib.animation import FuncAnimation\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sounddevice as sd\n\n\ndef int_or_str(text):\n \"\"\"Helper function for argument parsing.\"\"\"\n try:\n return int(text)\n except ValueError:\n return text\n\n\nparser = argparse.ArgumentParser(add_help=False)\nparser.add_argument(\n '-l', '--list-devices', action='store_true',\n help='show list of audio devices and exit')\nargs, remaining = parser.parse_known_args()\nif args.list_devices:\n print(sd.query_devices())\n parser.exit(0)\nparser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=[parser])\nparser.add_argument(\n 'channels', type=int, default=[1], nargs='*', metavar='CHANNEL',\n help='input channels to plot (default: the first)')\nparser.add_argument(\n '-d', '--device', type=int_or_str,\n help='input device (numeric ID or substring)')\nparser.add_argument(\n '-w', '--window', type=float, default=200, metavar='DURATION',\n help='visible time slot (default: %(default)s ms)')\nparser.add_argument(\n '-i', '--interval', type=float, default=30,\n help='minimum time between plot updates (default: %(default)s ms)')\nparser.add_argument(\n '-b', '--blocksize', type=int, help='block size (in samples)')\nparser.add_argument(\n '-r', '--samplerate', type=float, help='sampling rate of audio device')\nparser.add_argument(\n '-n', '--downsample', type=int, default=10, metavar='N',\n help='display every Nth sample (default: %(default)s)')\nargs = parser.parse_args(remaining)\nif any(c < 1 for c in args.channels):\n parser.error('argument CHANNEL: must be >= 1')\nmapping = [c - 1 for c in args.channels] # Channel numbers start with 1\nq = queue.Queue()\n\n\ndef audio_callback(indata, frames, time, status):\n \"\"\"This is called (from a separate thread) for each audio block.\"\"\"\n if status:\n print(status)\n # Fancy indexing with mapping creates a (necessary!) copy:\n q.put(indata[::args.downsample, mapping])\n\n\ndef update_plot(frame):\n \"\"\"This is called by matplotlib for each plot update.\n\n Typically, audio callbacks happen more frequently than plot updates,\n therefore the queue tends to contain multiple blocks of audio data.\n\n \"\"\"\n global plotdata\n while True:\n try:\n data = q.get_nowait()\n except queue.Empty:\n break\n shift = len(data)\n plotdata = np.roll(plotdata, -shift, axis=0)\n plotdata[-shift:, :] = data\n for column, line in enumerate(lines):\n line.set_ydata(plotdata[:, column])\n return lines\n\n\ntry:\n if args.samplerate is None:\n device_info = sd.query_devices(args.device, 'input')\n args.samplerate = device_info['default_samplerate']\n\n length = int(args.window * args.samplerate / (1000 * args.downsample))\n plotdata = np.zeros((length, len(args.channels)))\n\n fig, ax = plt.subplots()\n lines = ax.plot(plotdata)\n if len(args.channels) > 1:\n ax.legend(['channel {}'.format(c) for c in args.channels],\n loc='lower left', ncol=len(args.channels))\n ax.axis((0, len(plotdata), -1, 1))\n ax.set_yticks([0])\n ax.yaxis.grid(True)\n ax.tick_params(bottom=False, top=False, labelbottom=False,\n right=False, left=False, labelleft=False)\n fig.tight_layout(pad=0)\n\n stream = sd.InputStream(\n device=args.device, channels=max(args.channels),\n samplerate=args.samplerate, callback=audio_callback)\n ani = FuncAnimation(fig, update_plot, interval=args.interval, blit=True)\n with stream:\n plt.show()\nexcept Exception as e:\n parser.exit(type(e).__name__ + ': ' + str(e))\n"
] |
[
[
"numpy.roll",
"matplotlib.pyplot.show",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.subplots"
]
] |
fsxfreak/yarnnlm
|
[
"de117fc58337e206f3d2ca2d60a76d0d2a8078fb"
] |
[
"src/find_rare_subs.py"
] |
[
"'''\nOutputs likely sentences with rare words substituted into the sentence.\n'''\nimport logging as log\nimport os, sys\n\nimport tensorflow as tf\nimport numpy as np\n\nimport reader, util\nfrom flags import * # for FLAGS\nfrom model import RNNLM\n\nlog.basicConfig(stream=sys.stderr, level=log.INFO,\n format='%(asctime)s [%(levelname)s]:%(message)s', \n datefmt='%Y-%m-%d %H:%M:%S') \n\nflags.DEFINE_string('rare_src', '../data/rare.txt',\n 'List of words deemed as rare in the parallel training data.')\nflags.DEFINE_string('train_src', '../data/train.src',\n 'Original training data source.')\nflags.DEFINE_string('out_src', '../data/train.src.fwd.rare-aug',\n 'Source sentences with a proposed rare augmentation.')\n\ndef main(_):\n # train_data and dev_data should be unused. using this for the vocab\n train_data, dev_data, tok_id, id_tok = reader.get_train_data(\n FLAGS.train_data, FLAGS.dev_data, FLAGS.vocab_data, FLAGS.vocab_size)\n\n train_src, _, _ = reader.prepare_data(FLAGS.train_src, \n FLAGS.vocab_data, FLAGS.vocab_size)\n rare_src, _, _ = reader.prepare_data(FLAGS.rare_src,\n FLAGS.vocab_data, FLAGS.vocab_size)\n rare_src = reader.squash_data(rare_src)\n\n log.debug('train_src: %s' % train_src[:3])\n log.debug('rare_src: %s' % rare_src[:3])\n\n model = RNNLM(FLAGS, train_data, dev_data, id_tok)\n index_subs = model.find_rare_subs(train_src, rare_src)\n\n log.debug('Got %d substitutions.' % len(index_subs))\n log.info('Writing results to %s.' % FLAGS.out_src)\n with open(FLAGS.out_src, 'w') as f:\n for index, sub in subs:\n humanized = util.convert_id_tok([ sub ], id_tok)\n f.write('%d\\t%s\\n' % (index, humanized))\n\nif __name__ == '__main__':\n if FLAGS.output_mode == 'debug' or FLAGS.output_mode == 'verbose':\n log.getLogger().setLevel(log.DEBUG)\n elif FLAGS.output_mode == 'info':\n log.getLogger().setLevel(log.INFO)\n tf.app.run()\n"
] |
[
[
"tensorflow.app.run"
]
] |
guanlnny/HDRmaster
|
[
"0680695841659ea3aff09dab229d1738e742a0b9"
] |
[
"HDRmaster-cpu/vgg.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom torchvision import models\n\n\nclass Vgg19(torch.nn.Module):\n def __init__(self, requires_grad=False):\n super(Vgg19, self).__init__()\n vgg_pretrained_features = models.vgg19(pretrained=True).features\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n self.slice4 = torch.nn.Sequential()\n self.slice5 = torch.nn.Sequential()\n for x in range(2):\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(2, 7):\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n for x in range(7, 12):\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n for x in range(12, 21):\n self.slice4.add_module(str(x), vgg_pretrained_features[x])\n for x in range(21, 30):\n self.slice5.add_module(str(x), vgg_pretrained_features[x])\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, X):\n h_relu1 = self.slice1(X)\n h_relu2 = self.slice2(h_relu1)\n h_relu3 = self.slice3(h_relu2)\n h_relu4 = self.slice4(h_relu3)\n h_relu5 = self.slice5(h_relu4)\n out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]\n return out\n\n\nclass VGGLoss(nn.Module):\n def __init__(self):\n super(VGGLoss, self).__init__()\n self.vgg = Vgg19().cpu()\n self.criterion = nn.L1Loss()\n self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]\n\n def forward(self, x, y):\n x_vgg, y_vgg = self.vgg(x), self.vgg(y)\n loss = 0\n for i in range(len(x_vgg)):\n loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())\n return loss\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.L1Loss"
]
] |
annacuomo/TenK10K_analyses_HPC
|
[
"2023a3e36d06cf66bc114e6b4d00a2e3345fbc3b"
] |
[
"scripts/run_association_Tcells_one_gene.py"
] |
[
"import os\nimport sys\nimport scanpy as sc\nimport pandas as pd\nimport xarray as xr\nfrom numpy import ones\nfrom pandas_plink import read_plink1_bin\nfrom numpy.linalg import cholesky\nimport time\nfrom limix.qc import quantile_gaussianize\n\nfrom cellregmap import run_association_fast\n\n\narg = {}\n\n# chrom\narg[\"chrom\"] = str(sys.argv[1])\n\n# gene index\narg[\"i\"] = int(sys.argv[2])\n\nmydir = \"/share/ScratchGeneral/anncuo/OneK1K/\"\ninput_files_dir = mydir+\"input_files_CellRegMap/\"\n\n\n######################################\n###### sample mapping file (SMF) #####\n######################################\n\n## this file will map cells to donors \n## in this case, it is limited to T cells only \nsample_mapping_file = input_files_dir+\"smf_Tcells.csv\"\nsample_mapping = pd.read_csv(sample_mapping_file, dtype={\"individual_long\": str, \"genotype_individual_id\": str, \"phenotype_sample_id\": str}, index_col=0)\n\n## extract unique individuals\ndonors0 = sample_mapping[\"genotype_individual_id\"].unique()\ndonors0.sort()\nprint(\"Number of unique donors: {}\".format(len(donors0)))\n\n######################################\n########### phenotype file ###########\n######################################\n\n# open anndata \nmy_file = mydir + \"expression_objects/sce\"+str(arg[\"chrom\"])+\".h5ad\"\nadata = sc.read(my_file)\n# sparse to dense\nmat = adata.raw.X.todense()\n# make pandas dataframe\nmat_df = pd.DataFrame(data=mat.T, index=adata.raw.var.index, columns=adata.obs.index)\n# turn into xr array\nphenotype = xr.DataArray(mat_df.values, dims=[\"trait\", \"cell\"], coords={\"trait\": mat_df.index.values, \"cell\": mat_df.columns.values})\nphenotype = phenotype.sel(cell=sample_mapping[\"phenotype_sample_id\"].values)\n\ndel mat\ndel mat_df\n\ngenes = phenotype.trait.values\n\n##########################################\n###### check if file already exists ######\n##########################################\n\n# this definitely should be higher up at the very least, or this could be inserted into some sort of pipeline to take care of it?\ngene_name = genes[arg[\"i\"]]\n\nfolder = mydir + \"CRM_association/all_Tcells/\"\noutfilename = f\"{folder}{gene_name}.tsv\"\nprint(outfilename)\n\nif os.path.exists(outfilename):\n print(\"File already exists, exiting\")\n sys.exit()\n\n######################################\n############ kinship file ############\n######################################\n\n## read in GRM (genotype relationship matrix; kinship matrix)\nkinship_file = input_files_dir+\"grm_wide.csv\"\nK = pd.read_csv(kinship_file, index_col=0)\nK.index = K.index.astype('str')\nassert all(K.columns == K.index) #symmetric matrix, donors x donors\n\nK = xr.DataArray(K.values, dims=[\"sample_0\", \"sample_1\"], coords={\"sample_0\": K.columns, \"sample_1\": K.index})\nK = K.sortby(\"sample_0\").sortby(\"sample_1\")\ndonors = sorted(set(list(K.sample_0.values)).intersection(donors0))\nprint(\"Number of donors after kinship intersection: {}\".format(len(donors)))\n\n## subset to relevant donors\nK = K.sel(sample_0=donors, sample_1=donors)\nassert all(K.sample_0 == donors)\nassert all(K.sample_1 == donors)\n\n## and decompose such as K = hK @ hK.T (using Cholesky decomposition)\nhK = cholesky(K.values)\nhK = xr.DataArray(hK, dims=[\"sample\", \"col\"], coords={\"sample\": K.sample_0.values})\nassert all(hK.sample.values == K.sample_0.values)\n\ndel K\nprint(\"Sample mapping number of rows BEFORE intersection: {}\".format(sample_mapping.shape[0]))\n## subsample sample mapping file to donors in the kinship matrix\nsample_mapping = sample_mapping[sample_mapping[\"genotype_individual_id\"].isin(donors)]\nprint(\"Sample mapping number of rows AFTER intersection: {}\".format(sample_mapping.shape[0]))\n\n## use sel from xarray to expand hK (using the sample mapping file)\nhK_expanded = hK.sel(sample=sample_mapping[\"genotype_individual_id\"].values)\nassert all(hK_expanded.sample.values == sample_mapping[\"genotype_individual_id\"].values)\n\n######################################\n############ genotype file ###########\n######################################\n\n## read in genotype file (plink format)\nplink_folder = mydir + \"plink_files/\"\nplink_file = plink_folder+\"plink_chr\"+str(arg[\"chrom\"])+\".bed\"\nG = read_plink1_bin(plink_file)\n\ndef cis_snp_selection(feature_id, annotation_df, G, window_size):\n feature = annotation_df.query(\"gene_name==\\\"{}\\\"\".format(feature_id)).squeeze()\n chrom = str(feature['seqid'])\n start = feature['start']\n end = feature['end']\n # make robust to features self-specified back-to-front\n lowest = min([start,end])\n highest = max([start,end])\n # for cis, we sequentially add snps that fall within each region\n G = G.where((G.chrom == str(chrom)) & (G.pos > (lowest-window_size)) & (G.pos < (highest+window_size)), drop=True)\n return G\n\n\n# gene annotation file linking gene to genomic position\nannotation_file = mydir+\"GeneLocations.tsv\"\nanno_df = pd.read_csv(annotation_file, sep=\"\\t\", index_col=0)\n\n# window size (cis)\nw = 100000\n\nG_sel = cis_snp_selection(gene_name, anno_df, G, w)\n\n# expand out genotypes from cells to donors (and select relevant donors in the same step)\nG_expanded = G_sel.sel(sample=sample_mapping[\"individual_long\"].values)\n#assert all(hK_expanded.sample.values == G_expanded.sample.values)\n\ndel G\n\n######################################\n############ context file ############\n######################################\n\n# cells by PCs (10)\nC_file = input_files_dir+\"PCs.csv.pkl\"\nC = pd.read_pickle(C_file)\nC = xr.DataArray(C.values, dims=[\"cell\", \"pc\"], coords={\"cell\": C.index.values, \"pc\": C.columns.values})\nC = C.sel(cell=sample_mapping[\"phenotype_sample_id\"].values)\nassert all(C.cell.values == sample_mapping[\"phenotype_sample_id\"].values)\n\n# C_gauss = quantile_gaussianize(C)\n\n######################################\n########### Prepare model ############\n######################################\n\nn_cells = phenotype.shape[1]\nW = ones((n_cells, 1)) # just intercept as covariates\n\n# select gene\ny = phenotype.sel(trait=gene_name)\n\n\n\ny = quantile_gaussianize(y)\ny = y.values.reshape(y.shape[0],1)\n\ndel phenotype\n\nGG = G_expanded.values\n\ndel G_sel\n\nprint(\"Running for gene {}\".format(gene_name))\n\npvals = run_association_fast(y, W, C.values[:,0:10], G=GG, hK=hK_expanded)[0]\n\npv = pd.DataFrame({\"chrom\":G_expanded.chrom.values,\n \"pv\":pvals,\n \"variant\":G_expanded.snp.values})\npv.to_csv(outfilename)\n"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame",
"numpy.ones",
"numpy.linalg.cholesky",
"pandas.read_pickle"
]
] |
Amarify/DXC-Industrialized-AI-Starter
|
[
"87e050e6871bfcd86f37c90ec597f035cb270744"
] |
[
"dxc/ai/clean_data/clean_data.py"
] |
[
"import pandas as pd\nimport janitor #data cleaning\nfrom ftfy import fix_text #data cleaning\nimport nltk #data cleaning\nnltk.download('punkt') #data cleaning\nimport scrubadub #data cleaning\nimport arrow #normalizing dates\nimport numpy as np\nfrom sklearn.base import TransformerMixin\nfrom sklearn.impute import KNNImputer ##using KNN as imputer for categorical fields\nfrom sklearn.preprocessing import OrdinalEncoder ##Ordinal encoder is being used for encoding categorical objects\nfrom dxc.ai.global_variables import globals_file\n\n\ndef encode(data):\n\n '''function to encode non-null data and replace it in the original data'''\n encoder = OrdinalEncoder()\n #retains only non-null values\n nonulls = np.array(data.dropna())\n #reshapes the data for encoding\n impute_reshape = nonulls.reshape(-1,1)\n #encode date\n impute_ordinal = encoder.fit_transform(impute_reshape)\n #encoders_store[column_name]=encoder\n #Assign back encoded values to non-null values\n data.loc[data.notnull()] = np.squeeze(impute_ordinal)\n return (data,encoder)\n\ndef impute_df(df):\n # imputer = KNN()\n imputer = KNNImputer(n_neighbors=2)\n object_types = list(df.select_dtypes(include=['object']).columns)\n num_types = list(set(df.columns) - set(object_types))\n encoders_store={}\n for column in num_types:\n skew=df[column].skew()\n if (-1 < skew < 1):\n df[column]=df[column].fillna(df[column].mean())\n else :\n df[column]=df[column].fillna(df[column].median())\n #create a for loop to iterate through each column in the data\n for columns in object_types:\n new=encode(df[columns])\n encoders_store[columns]=new[1]\n imputed_data = pd.DataFrame(np.round(imputer.fit_transform(df)),columns = df.columns)\n for columns in object_types:\n imputed_data[columns]=encoders_store[columns].inverse_transform(np.array(imputed_data[columns]).reshape(-1,1))\n return imputed_data\n\n#CLEANING FILE\ndef clean_dataframe(df, impute = False, text_fields = [], date_fields = [], numeric_fields = [], categorical_fields = []):\n clean_df = (\n df\n #make the column names lower case and remove spaces\n .clean_names()\n\n #remove empty columns\n .remove_empty()\n\n #remove empty rows and columns\n .dropna(how='all')\n )\n\n #remove harmful characters. remove personal identifiers. make lowercase\n for field in text_fields:\n field = '_'.join(field.split()).lower()\n clean_df[field] = clean_df[field].fillna(' ').apply(fix_text)\n clean_df[field] = clean_df[field].apply(scrubadub.clean, replace_with='identifier')\n clean_df[field] = clean_df[field].str.lower()\n\n #impute missing values\n if impute:\n clean_df = impute_df(clean_df)\n\n #standardize the format of all date fields\n for field in date_fields:\n field = '_'.join(field.split()).lower()\n clean_df[field] = clean_df[field].apply(arrow.get)\n\n #make sure all numeric fields have the proper data type\n for field in numeric_fields:\n field = '_'.join(field.split()).lower()\n clean_df[field] = pd.to_numeric(clean_df[field])\n\n #make sure all categorical variables have the proper data type\n for field in categorical_fields:\n field = '_'.join(field.split()).lower()\n clean_df[field] = clean_df[field].astype('category')\n\n clean_df=clean_df.clean_names()\n\n globals_file.clean_data_used = True\n\n return(clean_df)\n"
] |
[
[
"numpy.squeeze",
"sklearn.impute.KNNImputer",
"sklearn.preprocessing.OrdinalEncoder",
"numpy.array",
"pandas.to_numeric"
]
] |
louieworth/Pytorch_RL
|
[
"9313c987a5e5d5727a68ffd64225426bd986f6bd"
] |
[
"Policy/AC/main_AC.py"
] |
[
"import numpy as np\nimport gym\nfrom AC import Agent\nfrom utils import plotLearning\nimport matplotlib.pyplot as plt\nfrom torch.utils.tensorboard import SummaryWriter\n\nwriter = SummaryWriter()\n\nif __name__ == \"__main__\":\n agent = Agent(alpha=5e-5, beta=1e-5, input_dim=[2], gamma=0.99,\n layer1_size=256, layer2_size=256)\n\n env = gym.make('MountainCarContinuous-v0')\n scores = []\n num_episodes = 10\n for i in range(num_episodes):\n done = False\n score = 0\n observation = env.reset()\n while not done:\n action = np.array(agent.choose_action(observation)).reshape((1, ))\n observation_, reward, done, info = env.step(action)\n agent.learn(observation, reward, observation_, done)\n observation = observation_\n score += reward\n scores.append(score)\n print('Episode: {}, | Score: {:.2f}'.format(i, score))\n writer.add_scalar('Accuarcy/loss', score, i)\n\n # filename = 'Mountaincar-continous.png'\n # plotLearning(scores, filename, window=20)\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"torch.utils.tensorboard.SummaryWriter"
]
] |
VasilyevEvgeny/self-focusing_3D
|
[
"c90b4d78d2d72365566f8a49b325bd48127b1e44",
"c90b4d78d2d72365566f8a49b325bd48127b1e44"
] |
[
"core/noise.py",
"core/kerr_effect.py"
] |
[
"from abc import ABCMeta, abstractmethod\nfrom pyfftw.builders import ifft2\nfrom numpy import sqrt, pi, exp, zeros, float64, complex64, correlate, var\nfrom numpy import random, mean\nfrom numpy.fft import fftshift\nfrom numba import jit\n\n\nclass ComplexNoise(metaclass=ABCMeta):\n \"\"\"\n Abstract class for complex noise object.\n It is assumed that in the problem of propagation of a 3-dimensional beam in coordinates of x and y\n a complex multiplicative noise can be introduced with a certain percentage contribution.\n\n In general, such characteristics as the variance and correlation radius should be calculated.\n Аrrays are created for storing the most complex noise, as well as for its real and imaginary parts. In addition,\n autocorrelation functions are calculated along the coordinates of x and y for the real and imaginary parts\n of the noise.\n \"\"\"\n\n def __init__(self, **kwargs):\n self._variance_expected = kwargs.get('variance', 1) # variance\n\n self._r_corr_in_meters = kwargs['r_corr_in_meters'] # correlation radius\n self._r_corr_in_points = None\n\n self._noise_field = None # array for complex noise\n self._noise_field_real = None # array for the real part of complex noise\n self._noise_field_imag = None # array for the imaginary part of complex noise\n\n self._autocorr_real_x = None # autocorr function of real part of complex noise when averaged along x (len=n_y)\n self._autocorr_real_y = None # autocorr function of real part of complex noise when averaged along y (len=n_x)\n self._autocorr_imag_x = None # autocorr function of real part of complex noise when averaged along x (len=n_y)\n self._autocorr_imag_y = None # autocorr function of real part of complex noise when averaged along y (len=n_x)\n\n # spatial grid parameters\n self._n_x, self._n_y, self._dx, self._dy = None, None, None, None\n\n @property\n def variance_expected(self):\n return self._variance_expected\n\n @property\n def variance_real(self):\n return var(self._noise_field_real)\n\n @property\n def variance_imag(self):\n return var(self._noise_field_imag)\n\n @property\n def r_corr_in_meters(self):\n return self._r_corr_in_meters\n\n @property\n def noise_field(self):\n return self._noise_field\n\n @property\n def autocorrs(self):\n return self._autocorr_real_x, self._autocorr_real_y, self._autocorr_imag_x, self._autocorr_imag_y\n\n def initialize(self, **params):\n \"\"\"Initialization of grid parameters and correlation radius in points\"\"\"\n\n self._n_x = params['n_x']\n self._n_y = params['n_y']\n self._dx = params['dx']\n self._dy = params['dy']\n\n self._r_corr_in_points = self._r_corr_in_meters // max(self._dx, self._dy)\n\n @staticmethod\n @jit(nopython=True)\n def _initialize_noise_arrays(noise_field, n_x, n_y):\n \"\"\"\n Initialization of real and imaginary parts of complex noise in corresponding arrays\n \"\"\"\n\n real_part = zeros(shape=(n_x, n_y), dtype=float64)\n imag_part = zeros(shape=(n_x, n_y), dtype=float64)\n\n for i in range(n_x):\n for j in range(n_y):\n real_part[i, j] = noise_field[i, j].real\n imag_part[i, j] = noise_field[i, j].imag\n\n return real_part, imag_part\n\n @abstractmethod\n def process(self):\n \"\"\"Noise and autocorr functions generation\"\"\"\n\n @staticmethod\n def __calculate_autocorr(noise, n_iter, n, autocorr_type):\n \"\"\"\n Calculation of autocorrelation function\n\n :param noise: noise array (real or imaginary part of complex noise)\n :param n_iter: the number of spatial layers over which the autocorr function is averaged\n for x-functions it is n_x, for y-functions it is n_y\n :param n: size of autocorr array\n :param autocorr_type: axis, along which autocorrelation function is averaged\n\n :return: averaged along one axis autocorrelation function\n \"\"\"\n\n autocorr = zeros(shape=(n,), dtype=float64)\n for i in range(n_iter):\n if autocorr_type == 'x':\n autocorr += correlate(noise[i, :], noise[i, :], mode='same')\n elif autocorr_type == 'y':\n autocorr += correlate(noise[:, i], noise[:, i], mode='same')\n else:\n raise Exception('Wrong type!')\n autocorr /= n_iter\n\n return autocorr\n\n def _calculate_autocorrelations(self):\n \"\"\"Calculation of all 4 autocorrelation functions\"\"\"\n\n self._autocorr_real_x = self.__calculate_autocorr(self._noise_field_real, self._n_x, self._n_y, 'x')\n self._autocorr_real_y = self.__calculate_autocorr(self._noise_field_real, self._n_y, self._n_x, 'y')\n self._autocorr_imag_x = self.__calculate_autocorr(self._noise_field_imag, self._n_x, self._n_y, 'x')\n self._autocorr_imag_y = self.__calculate_autocorr(self._noise_field_imag, self._n_y, self._n_x, 'y')\n\n @staticmethod\n def __find_r_corr_in_points(arr):\n \"\"\"\n Calculation of correlation radius in points by level exp(-1.0) of function in arr with max value is\n situated in the center.\n\n :param arr: array, whose correlation radius needs to be found\n\n :return: correlation radius in points\n \"\"\"\n n = len(arr)\n th = arr[n // 2] * exp(-1.0)\n for i in range(n // 2, n, 1):\n if arr[i] < th:\n return i - n // 2\n\n def calculate_r_corr(self):\n \"\"\"Calculates correlation radius for all 4 autocorrelation functions\"\"\"\n\n r_corr_real_x = self._dx * self.__find_r_corr_in_points(self._autocorr_real_x)\n r_corr_real_y = self._dy * self.__find_r_corr_in_points(self._autocorr_real_y)\n r_corr_imag_x = self._dx * self.__find_r_corr_in_points(self._autocorr_imag_x)\n r_corr_imag_y = self._dy * self.__find_r_corr_in_points(self._autocorr_imag_y)\n\n # Returns mean of calculated correlation radii\n return mean([r_corr_real_x, r_corr_real_y, r_corr_imag_x, r_corr_imag_y])\n\n\nclass GaussianNoise(ComplexNoise):\n \"\"\"\n Subclass for complex Gaussian noise.\n Gaussian noise with a given variance and correlation radius is generated by the spectral method:\n 1. separately for the real and imaginary noise parts a uniform distribution is generated\n in the range -\\sqrt{3} to +\\sqrt{3}\n 2. generated distributions are multiplied by a Gaussian envelope with a pre-calculated radius depending on\n the variance and correlation radius\n 3. an inverse Fourier transform is done\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @staticmethod\n @jit(nopython=True)\n def __generate_protoarray(n_x, n_y, variance, r_corr_in_points):\n \"\"\"Generation of proto array with uniform distribution multiplied by Gaussian envelope\"\"\"\n\n proto = zeros(shape=(n_x, n_y), dtype=complex64)\n\n scale = r_corr_in_points / max(n_x, n_y)\n cf = scale * sqrt(pi * variance)\n d = 0.5 * (pi * scale) ** 2\n amplitude = sqrt(12)\n\n for i in range(n_x):\n for j in range(n_y):\n a, b = amplitude * (random.random()-0.5), amplitude * (random.random()-0.5)\n gauss = cf * exp(-d * ((i-n_x//2) ** 2 + (j-n_y//2) ** 2))\n proto[i, j] = a * gauss + 1j * b * gauss\n\n return proto\n\n @staticmethod\n @jit(nopython=True)\n def __normalize_after_fft(arr):\n \"\"\"Normalization of array after inverse Fourier transform\"\"\"\n\n n1, n2 = arr.shape[0], arr.shape[1]\n for i in range(n1):\n for j in range(n2):\n arr[i, j] *= n1 * n2\n\n return arr\n\n def process(self):\n \"\"\"Noise and autocorr functions generation\"\"\"\n\n # noise field generation\n proto = self.__generate_protoarray(self._n_x, self._n_y, self._variance_expected, self._r_corr_in_points)\n proto_shifted = fftshift(proto, axes=(0, 1))\n proto_fft_obj = ifft2(proto_shifted)\n proto_fft_normalized = self.__normalize_after_fft(proto_fft_obj())\n self._noise_field = proto_fft_normalized\n\n # initialization of arrays for real and imaginary parts\n self._noise_field_real, self._noise_field_imag = \\\n self._initialize_noise_arrays(self._noise_field, self._n_x, self._n_y)\n\n # autocorrelation functions calculation\n self._calculate_autocorrelations()\n",
"from abc import ABCMeta, abstractmethod\nfrom numba import jit\nfrom numpy import exp, multiply\n\n\nclass KerrExecutor(metaclass=ABCMeta):\n \"\"\"\n Abstract class for Kerr effect object.\n The class takes on the input in the constructor a beam object, which contains all the necessary beam parameters\n for further calculations.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.__beam = kwargs['beam']\n self.__nonlin_phase_const = -0.5j * self.__beam.r_kerr / self.__beam.z_diff # nonlinear Kerr phase shift const\n\n @abstractmethod\n def info(self):\n \"\"\"KerrExecutor type\"\"\"\n\n @staticmethod\n @jit(nopython=True)\n def phase_increment(field, intensity, current_nonlin_phase):\n \"\"\"\n :param field: array for complex light field\n :param intensity: array for float intensity of the field\n :param current_nonlin_phase: current nonlinear phase shift\n\n :return: field with nonlinear incremented phase shift\n \"\"\"\n return multiply(field, exp(current_nonlin_phase * intensity))\n\n def process_kerr_effect(self, dz):\n \"\"\"\n :param dz: current step along evolutionary coordinate z\n\n :return: None\n \"\"\"\n self.__beam._field = self.phase_increment(self.__beam._field, self.__beam.intensity,\n self.__nonlin_phase_const * dz)\n\n\nclass KerrExecutorX(KerrExecutor):\n \"\"\"\n Class for modeling the Kerr effect to which a 2-dimensional beam is exposed\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @property\n def info(self):\n return 'kerr_executor_x'\n\n\nclass KerrExecutorR(KerrExecutor):\n \"\"\"\n Class for modeling the Kerr effect to which a 3-dimensional beam in axisymmetric approximation is exposed\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @property\n def info(self):\n return 'kerr_executor_r'\n\n\nclass KerrExecutorXY(KerrExecutor):\n \"\"\"\n Class for modeling the Kerr effect to which a 3-dimensional beam is exposed\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @property\n def info(self):\n return 'kerr_executor_xy'\n"
] |
[
[
"numpy.random.random",
"numpy.sqrt",
"numpy.fft.fftshift",
"numpy.correlate",
"numpy.mean",
"numpy.var",
"numpy.exp",
"numpy.zeros"
],
[
"numpy.exp"
]
] |
vmarchesin/ml-playground
|
[
"d9ee805f0fc32bcff244e65aadfd344e24fae8d7",
"d9ee805f0fc32bcff244e65aadfd344e24fae8d7"
] |
[
"google-machine-learning-crash-course/py/2_tensorflow_example.py",
"google-machine-learning-crash-course/py/1_tensorflow_example.py"
] |
[
"from __future__ import print_function\n\nimport math\nimport os\n\nfrom IPython import display\nfrom matplotlib import cm\nfrom matplotlib import gridspec\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics\nimport tensorflow as tf\nfrom tensorflow.python.data import Dataset\n\nfrom utils.input_fn import my_input_fn\n\ndirname = os.path.dirname(__file__)\n\ntf.logging.set_verbosity(tf.logging.ERROR)\npd.options.display.max_rows = 10\npd.options.display.float_format = '{:.1f}'.format\n\ncsv = os.path.join(dirname, '../datasets/california_housing_train.csv')\ncalifornia_housing_dataframe = pd.read_csv(csv, sep=\",\")\n\n# Randomizing the DataFrame\ncalifornia_housing_dataframe = california_housing_dataframe.reindex(\n np.random.permutation(california_housing_dataframe.index)\n)\ncalifornia_housing_dataframe[\"median_house_value\"] /= 1000.0\n\ndef train_model(learning_rate, steps, batch_size, input_feature=\"total_rooms\"):\n \"\"\"Trains a linear regression model of one feature.\n\n Args:\n learning_rate: A `float`, the learning rate.\n steps: A non-zero `int`, the total number of training steps. A training step\n consists of a forward and backward pass using a single batch.\n batch_size: A non-zero `int`, the batch size.\n input_feature: A `string` specifying a column from `california_housing_dataframe`\n to use as input feature.\n \"\"\"\n\n periods = 10\n steps_per_period = steps / periods\n\n my_feature = input_feature\n my_feature_data = california_housing_dataframe[[my_feature]]\n my_label = \"median_house_value\"\n targets = california_housing_dataframe[my_label]\n\n # Create feature columns.\n feature_columns = [tf.feature_column.numeric_column(my_feature)]\n\n # Create input functions.\n training_input_fn = lambda:my_input_fn(my_feature_data, targets, batch_size=batch_size)\n prediction_input_fn = lambda: my_input_fn(my_feature_data, targets, num_epochs=1, shuffle=False)\n\n # Create a linear regressor object.\n my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)\n linear_regressor = tf.estimator.LinearRegressor(\n feature_columns=feature_columns,\n optimizer=my_optimizer\n )\n\n # Set up to plot the state of our model's line each period.\n plt.figure(figsize=(15, 6))\n plt.subplot(1, 2, 1)\n plt.title(\"Learned Line by Period\")\n plt.ylabel(my_label)\n plt.xlabel(my_feature)\n sample = california_housing_dataframe.sample(n=300)\n plt.scatter(sample[my_feature], sample[my_label])\n colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)]\n\n # Train the model, but do so inside a loop so that we can periodically assess\n # loss metrics.\n print(\"Training model...\")\n print(\"RMSE (on training data):\")\n root_mean_squared_errors = []\n for period in range (0, periods):\n # Train the model, starting from the prior state.\n linear_regressor.train(\n input_fn=training_input_fn,\n steps=steps_per_period\n )\n # Take a break and compute predictions.\n predictions = linear_regressor.predict(input_fn=prediction_input_fn)\n predictions = np.array([item['predictions'][0] for item in predictions])\n\n # Compute loss.\n root_mean_squared_error = math.sqrt(\n metrics.mean_squared_error(predictions, targets))\n # Occasionally print the current loss.\n print(\" period %02d : %0.2f\" % (period, root_mean_squared_error))\n # Add the loss metrics from this period to our list.\n root_mean_squared_errors.append(root_mean_squared_error)\n # Finally, track the weights and biases over time.\n # Apply some math to ensure that the data and line are plotted neatly.\n y_extents = np.array([0, sample[my_label].max()])\n\n weight = linear_regressor.get_variable_value('linear/linear_model/%s/weights' % input_feature)[0]\n bias = linear_regressor.get_variable_value('linear/linear_model/bias_weights')\n\n x_extents = (y_extents - bias) / weight\n x_extents = np.maximum(np.minimum(x_extents,\n sample[my_feature].max()),\n sample[my_feature].min())\n y_extents = weight * x_extents + bias\n plt.plot(x_extents, y_extents, color=colors[period])\n\n print(\"Model training finished.\")\n\n # Output a graph of loss metrics over periods.\n plt.subplot(1, 2, 2)\n plt.ylabel('RMSE')\n plt.xlabel('Periods')\n plt.title(\"Root Mean Squared Error vs. Periods\")\n plt.tight_layout()\n plt.plot(root_mean_squared_errors)\n plt.show()\n\n # Output a table with calibration data.\n calibration_data = pd.DataFrame()\n calibration_data[\"predictions\"] = pd.Series(predictions)\n calibration_data[\"targets\"] = pd.Series(targets)\n display.display(calibration_data.describe())\n\n print(\"Final RMSE (on training data): %0.2f\" % root_mean_squared_error)\n\n# RSME 166\n# train_model(\n# learning_rate=0.00001,\n# steps=1000,\n# batch_size=1000\n# )\n\n# RSME 176\ntrain_model(\n learning_rate=0.00002,\n steps=1000,\n batch_size=5,\n input_feature=\"population\"\n)",
"# https://colab.research.google.com/notebooks/mlcc/first_steps_with_tensor_flow.ipynb\nfrom __future__ import print_function\n\nimport math\nimport os\n\nfrom IPython import display\nfrom matplotlib import cm\nfrom matplotlib import gridspec\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics\nimport tensorflow as tf\nfrom tensorflow.python.data import Dataset\n\nfrom utils.input_fn import my_input_fn\n\ndirname = os.path.dirname(__file__)\n\ntf.logging.set_verbosity(tf.logging.ERROR)\npd.options.display.max_rows = 10\npd.options.display.float_format = '{:.1f}'.format\n\ncsv = os.path.join(dirname, '../datasets/california_housing_train.csv')\ncalifornia_housing_dataframe = pd.read_csv(csv, sep=\",\")\n\n# Randomizing the DataFrame\ncalifornia_housing_dataframe = california_housing_dataframe.reindex(\n np.random.permutation(california_housing_dataframe.index)\n)\ncalifornia_housing_dataframe[\"median_house_value\"] /= 1000.0\n\n# print(california_housing_dataframe.describe())\n\n# Define the input feature: total_rooms.\nmy_feature = california_housing_dataframe[[\"total_rooms\"]]\n\n# Configure a numeric feature column for total_rooms.\nfeature_columns = [tf.feature_column.numeric_column(\"total_rooms\")]\n\n# Define the label.\ntargets = california_housing_dataframe[\"median_house_value\"]\n\n# Use gradient descent as the optimizer for training the model.\nmy_optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.0000001)\nmy_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)\n\n# Configure the linear regression model with our feature columns and optimizer.\n# Set a learning rate of 0.0000001 for Gradient Descent.\nlinear_regressor = tf.estimator.LinearRegressor(\n feature_columns=feature_columns,\n optimizer=my_optimizer\n)\n\nlinear_regressor.train(\n input_fn = lambda:my_input_fn(my_feature, targets),\n steps=100\n)\n\n# Evaluating the model\n\n# Create an input function for predictions.\n# Note: Since we're making just one prediction for each example, we don't\n# need to repeat or shuffle the data here.\nprediction_input_fn = lambda: my_input_fn(my_feature, targets, num_epochs=1, shuffle=False)\n\n# Call predict() on the linear_regressor to make predictions.\npredictions = linear_regressor.predict(input_fn=prediction_input_fn)\n\n# Format predictions as a NumPy array, so we can calculate error metrics.\npredictions = np.array([item['predictions'][0] for item in predictions])\n\n# Print Mean Squared Error and Root Mean Squared Error.\nmean_squared_error = metrics.mean_squared_error(predictions, targets)\nroot_mean_squared_error = math.sqrt(mean_squared_error)\nprint(\"Mean Squared Error (on training data): %0.3f\" % mean_squared_error)\nprint(\"Root Mean Squared Error (on training data): %0.3f\" % root_mean_squared_error)\n\nmin_house_value = california_housing_dataframe[\"median_house_value\"].min()\nmax_house_value = california_housing_dataframe[\"median_house_value\"].max()\nmin_max_difference = max_house_value - min_house_value\n\nprint(\"Min. Median House Value: %0.3f\" % min_house_value)\nprint(\"Max. Median House Value: %0.3f\" % max_house_value)\nprint(\"Difference between Min. and Max.: %0.3f\" % min_max_difference)\nprint(\"Root Mean Squared Error: %0.3f\" % root_mean_squared_error)\n\n# Reducing model error\n\ncalibration_data = pd.DataFrame()\ncalibration_data[\"predictions\"] = pd.Series(predictions)\ncalibration_data[\"targets\"] = pd.Series(targets)\nprint(calibration_data.describe())\n\nsample = california_housing_dataframe.sample(n=300)\n\n# Get the min and max total_rooms values.\nx_0 = sample[\"total_rooms\"].min()\nx_1 = sample[\"total_rooms\"].max()\n\n# Retrieve the final weight and bias generated during training.\nweight = linear_regressor.get_variable_value('linear/linear_model/total_rooms/weights')[0]\nbias = linear_regressor.get_variable_value('linear/linear_model/bias_weights')\n\n# Get the predicted median_house_values for the min and max total_rooms values.\ny_0 = weight * x_0 + bias\ny_1 = weight * x_1 + bias\n\n# Plot our regression line from (x_0, y_0) to (x_1, y_1).\nplt.plot([x_0, x_1], [y_0, y_1], c='r')\n\n# Label the graph axes.\nplt.ylabel(\"median_house_value\")\nplt.xlabel(\"total_rooms\")\n\n# Plot a scatter plot from our data sample.\nplt.scatter(sample[\"total_rooms\"], sample[\"median_house_value\"])\n\n# Display graph.\nplt.show()\n"
] |
[
[
"pandas.Series",
"matplotlib.cm.coolwarm",
"numpy.linspace",
"tensorflow.contrib.estimator.clip_gradients_by_norm",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"sklearn.metrics.mean_squared_error",
"pandas.read_csv",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot",
"tensorflow.logging.set_verbosity",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"tensorflow.estimator.LinearRegressor",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.feature_column.numeric_column",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter",
"numpy.random.permutation",
"matplotlib.pyplot.xlabel"
],
[
"pandas.read_csv",
"pandas.Series",
"matplotlib.pyplot.scatter",
"tensorflow.estimator.LinearRegressor",
"tensorflow.contrib.estimator.clip_gradients_by_norm",
"sklearn.metrics.mean_squared_error",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"tensorflow.train.GradientDescentOptimizer",
"numpy.random.permutation",
"tensorflow.logging.set_verbosity",
"tensorflow.feature_column.numeric_column",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
sequence-dev/landlab
|
[
"a84fbf67a46de08bf8b6758bb316bff3423e746c"
] |
[
"landlab/io/tests/test_read_esri_ascii.py"
] |
[
"#! /usr/bin/env python\n\"\"\"\nUnit tests for landlab.io.esri_ascii module.\n\"\"\"\nimport os\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\nfrom six import StringIO\n\nfrom landlab.io import read_esri_ascii, read_asc_header\nfrom landlab.io import (MissingRequiredKeyError, KeyTypeError, DataSizeError,\n BadHeaderLineError, KeyValueError, \n MismatchGridDataSizeError)\nfrom landlab import RasterModelGrid\n\n\n_TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')\n\n\ndef test_hugo_read_file_name():\n (grid, field) = read_esri_ascii(os.path.join(_TEST_DATA_DIR,\n 'hugo_site.asc'))\n\n assert isinstance(grid, RasterModelGrid)\n\n assert field.size == 55 * 76\n assert field.shape == (55 * 76, )\n\n\ndef test_hugo_read_file_like():\n with open(os.path.join(_TEST_DATA_DIR, 'hugo_site.asc')) as asc_file:\n (grid, field) = read_esri_ascii(asc_file)\n\n assert isinstance(grid, RasterModelGrid)\n\n assert field.size == 55 * 76\n assert field.shape == (55 * 76, )\n\n\ndef test_hugo_reshape():\n with open(os.path.join(_TEST_DATA_DIR, 'hugo_site.asc')) as asc_file:\n (grid, field) = read_esri_ascii(asc_file, reshape=True)\n\n assert isinstance(grid, RasterModelGrid)\n\n assert field.shape == (55, 76)\n\n\ndef test_4x3_read_file_name():\n (grid, field) = read_esri_ascii(os.path.join(_TEST_DATA_DIR,\n '4_x_3.asc'))\n\n assert isinstance(grid, RasterModelGrid)\n\n assert isinstance(field, np.ndarray)\n assert_array_equal(field,\n np.array([9., 10., 11.,\n 6., 7., 8.,\n 3., 4., 5.,\n 0., 1., 2.]))\n\n\ndef test_4x3_read_file_like():\n with open(os.path.join(_TEST_DATA_DIR, '4_x_3.asc')) as asc_file:\n (grid, field) = read_esri_ascii(asc_file)\n\n assert isinstance(grid, RasterModelGrid)\n\n assert_array_equal(field,\n np.array([9., 10., 11.,\n 6., 7., 8.,\n 3., 4., 5.,\n 0., 1., 2.]))\n\n\ndef test_4x3_shape_mismatch():\n asc_file = StringIO(\n \"\"\"\nnrows 4\nncols 3\nxllcorner 1.\nyllcorner 2.\ncellsize 10.\nNODATA_value -9999\n1. 2. 3. 4.\n5. 6. 7. 8.\n9. 10. 11. 12.\n \"\"\")\n (grid, field) = read_esri_ascii(asc_file)\n assert field.size == 12\n\n asc_file = StringIO(\n \"\"\"\nnrows 4\nncols 3\nxllcorner 1.\nyllcorner 2.\ncellsize 10.\nNODATA_value -9999\n1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12.\n \"\"\")\n (grid, field) = read_esri_ascii(asc_file)\n assert field.size == 12\n\n\ndef test_4x3_size_mismatch():\n asc_file = StringIO(\n \"\"\"\nnrows 4\nncols 3\nxllcorner 1.\nyllcorner 2.\ncellsize 10.\nNODATA_value -9999\n1. 2. 3. 4. 5. 6. 7. 8. 9. 10.\n \"\"\")\n with pytest.raises(DataSizeError):\n read_esri_ascii(asc_file)\n \ndef test_grid_data_size_mismatch():\n asc_file = StringIO(\n \"\"\"\nnrows 4\nncols 3\nxllcorner 1.\nyllcorner 2.\ncellsize 10.\nNODATA_value -9999\n1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12.\n \"\"\")\n rmg = RasterModelGrid((10,10),10.)\n with pytest.raises(MismatchGridDataSizeError):\n read_esri_ascii(asc_file, grid=rmg) \n\n\ndef test_header_missing_required_key():\n asc_file = StringIO(\n \"\"\"\nnrows 4\nxllcorner 1.\nyllcorner 2.\ncellsize 10.\nNODATA_value -9999\n \"\"\")\n with pytest.raises(MissingRequiredKeyError):\n read_asc_header(asc_file)\n\n\ndef test_header_unknown_key():\n asc_file = StringIO(\n \"\"\"\nnrows 4\nncols 3\nxllcorner 1.\nyllcorner 2.\ncellsize 10.\nNODATA_value -9999\ninvalid_key 1\n \"\"\")\n with pytest.raises(BadHeaderLineError):\n read_asc_header(asc_file)\n\n\ndef test_header_missing_value():\n asc_file = StringIO(\n \"\"\"\nnrows 4\nncols 3\nxllcorner 1.\nyllcorner 2.\ncellsize\nNODATA_value -9999\ninvalid_key 1\n \"\"\")\n with pytest.raises(BadHeaderLineError):\n read_asc_header(asc_file)\n\n\ndef test_header_bad_values():\n asc_file = StringIO(\n \"\"\"\nnrows -4\nncols 3\nxllcorner 1.\nyllcorner 2.\ncellsize 10.\nNODATA_value -9999\n \"\"\")\n with pytest.raises(KeyValueError):\n read_asc_header(asc_file)\n\n\ndef test_header_missing_mutex_key():\n asc_file = StringIO(\n \"\"\"\nncols 3\nnrows 4\nyllcorner 2.\ncellsize 10.\nNODATA_value -9999\n \"\"\")\n with pytest.raises(MissingRequiredKeyError):\n read_asc_header(asc_file)\n\n\ndef test_header_mutex_key():\n asc_file = StringIO(\n \"\"\"\nncols 3\nnrows 4\nxllcenter 1.\nyllcorner 2.\ncellsize 10.\nNODATA_value -9999\n \"\"\")\n header = read_asc_header(asc_file)\n assert header['xllcenter'] == 1.\n with pytest.raises(KeyError):\n header['xllcorner']\n\n asc_file = StringIO(\n \"\"\"\nncols 3\nnrows 4\nxllcorner 1.\nyllcorner 2.\ncellsize 10.\nNODATA_value -9999\n \"\"\")\n header = read_asc_header(asc_file)\n assert header['xllcorner'] == 1.\n with pytest.raises(KeyError):\n header['xllcenter']\n\n\ndef test_header_missing_optional():\n asc_file = StringIO(\n \"\"\"\nncols 3\nnrows 4\nxllcenter 1.\nyllcorner 2.\ncellsize 10.\n \"\"\")\n header = read_asc_header(asc_file)\n with pytest.raises(KeyError):\n header['nodata_value']\n\n\ndef test_header_case_insensitive():\n asc_file = StringIO(\n \"\"\"\nnCoLs 3\nnrows 4\nXllcenter 1.\nYLLCORNER 2.\nCELLSIZE 10.\nNODATA_value -999\n \"\"\")\n header = read_asc_header(asc_file)\n for key in ['ncols', 'nrows', 'xllcenter', 'yllcorner', 'cellsize',\n 'nodata_value']:\n assert key in header\n\n\ndef test_header_wrong_type():\n asc_file = StringIO(\n \"\"\"\nnCoLs 3.5\nnrows 4\nXllcenter 1.\nYLLCORNER 2.\nCELLSIZE 10.\nNODATA_value -999\n \"\"\")\n with pytest.raises(KeyTypeError):\n read_asc_header(asc_file)\n\n\ndef test_name_keyword():\n (grid, field) = read_esri_ascii(os.path.join(_TEST_DATA_DIR,\n '4_x_3.asc'),\n name='air__temperature')\n\n assert isinstance(grid, RasterModelGrid)\n\n assert isinstance(field, np.ndarray)\n assert_array_equal(field,\n np.array([9., 10., 11.,\n 6., 7., 8.,\n 3., 4., 5.,\n 0., 1., 2.]))\n assert_array_almost_equal(grid.at_node['air__temperature'], field)\n assert grid.at_node['air__temperature'] is field\n \ndef test_halo_keyword():\n (grid, field) = read_esri_ascii(os.path.join(_TEST_DATA_DIR, \\\n '4_x_3.asc'), \\\n halo=1)\n \n assert isinstance(grid, RasterModelGrid)\n\n assert isinstance(field, np.ndarray)\n assert_array_equal(field,\n np.array([-9999., -9999., -9999., -9999., -9999., \n -9999., 9., 10., 11., -9999.,\n -9999., 6., 7., 8., -9999.,\n -9999., 3., 4., 5., -9999.,\n -9999., 0., 1., 2., -9999.,\n -9999., -9999., -9999., -9999., -9999.]))\n \ndef test_halo_keyword_no_nodata_value():\n (grid, field) = read_esri_ascii(os.path.join(_TEST_DATA_DIR, \\\n '4_x_3_no_nodata_value.asc'), \\\n halo=1)\n \n assert isinstance(grid, RasterModelGrid)\n\n assert isinstance(field, np.ndarray)\n assert_array_equal(field,\n np.array([-9999., -9999., -9999., -9999., -9999., \n -9999., 9., 10., 11., -9999.,\n -9999., 6., 7., 8., -9999.,\n -9999., 3., 4., 5., -9999.,\n -9999., 0., 1., 2., -9999.,\n -9999., -9999., -9999., -9999., -9999.]))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.array",
"numpy.testing.assert_array_almost_equal"
]
] |
jmcmahon443/nvidia-jetson-competition
|
[
"3e13841f02b7a67813d1437a91cf04be4674dcdd"
] |
[
"jetson_ws/src/beat_makers/beat_detector.py"
] |
[
"#!/usr/bin/env python\nfrom __future__ import division\n\nimport time, sys\nimport aubio, pyaudio\nimport rospy\nimport numpy as np\nfrom beat_msgs.msg import Beat\n\nRATE = 32000 #44100\n\n\nclass BeatMaker(object):\n LIVE=1\n OFFLINE=0\n\n source=[]\n rate=10 #calcualte by win_size/sampling rate: 512/44100 ~ 11.6ms\n def __init__(self, lvl):\n #ros\n #name=rospy.get_param('~name', 'beat_detector_node')\n #topic=rospy.get_param('~topic', 'beats')\n\n\n rospy.init_node(\"detector\" , log_level=lvl, anonymous=True)\n\n self.beat_pub = rospy.Publisher(\"out\", Beat, queue_size=10)\n # and use the implemented Models array here\n\n\n\n def audio_init(self, source, samplerate, win_s, hop_s):\n #pyaudio init.\n self.audio = pyaudio.PyAudio()\n self.audio_format = pyaudio.paFloat32\n self.fpb = hop_s\n n_channels = 1\n\n ## well, one line of ros logic here\n self.r = rospy.Rate(RATE//win_s) # 50 Hz\n # initialize ros beat_msgs self.msg=Beat()\n self.msg=Beat()\n self.msg.beat=True\n\n # select aubio source\n if (source == \"live\"): # or source\n print(\"Tapping to the live input\")\n self.mode=BeatMaker.LIVE\n samplerate=RATE\n self.btempo = aubio.tempo(\"default\", win_s, hop_s, samplerate)\n\n self.stream = self.audio.open(format=self.audio_format, channels=n_channels, rate=samplerate,\n input=True, frames_per_buffer=self.fpb,\n stream_callback=self.mic_callback)\n\n else:\n print(\"Tapping to the audio file\")\n self.mode=BeatMaker.OFFLINE\n self.source = aubio.source(source, samplerate, hop_s)\n samplerate = self.source.samplerate\n self.btempo = aubio.tempo(\"default\", win_s, hop_s, samplerate)\n\n self.stream = self.audio.open(format=self.audio_format, channels=n_channels, rate=samplerate,\n output=True, frames_per_buffer=self.fpb,\n stream_callback=self.file_callback)\n self.click = 0.7 * np.sin(2. * np.pi * np.arange(hop_s) / hop_s * samplerate / 3000.)\n # create aubio tempo detection\n\n # create a simple click sound\n\n\n#frames_per_buffer\n\n\n\n\n def run(self):\n self.stream.start_stream()\n while not rospy.is_shutdown() and self.stream.is_active():\n self.r.sleep()\n\n def pub(self):\n # is assigning a member field and just updating stamp faster?\n self.msg.mark=rospy.Time.now()\n self.beat_pub.publish(self.msg)\n\n\n def file_callback(self, _in_data, _frame_count, _time_info, _status):\n samples, read = self.source()\n #print(\"s \",len(samples)) # len =512, floats\n #print(\"read \",read) # same with hopsize\n is_beat = self.btempo(samples)\n if is_beat:\n samples += self.click\n self.pub()\n #print('tick') # for debugging, don'T pring in cb\n\n audiobuf = samples.tobytes()\n if read < hop_s:\n return (audiobuf, pyaudio.paComplete)\n return (audiobuf, pyaudio.paContinue)\n\n\n def mic_callback(self, _in_data, _frame_count, _time_info, _status):\n audio_data = np.fromstring(_in_data, dtype=np.float32)\n is_beat = self.btempo(audio_data)\n if is_beat:\n #samples += click\n self.pub() # avoid print in audio callback\n #audiobuf = samples.tobytes()\n return (audio_data, pyaudio.paContinue)\n\n\ndef parse():\n\n source = rospy.get_param('source', 'live')\n samplerate = int(rospy.get_param('rate', '44100'))\n\n # if len(sys.argv) < 2:\n # return source, samplerate\n # print(sys.argv)\n #\n # source = sys.argv[1]\n #\n #\n # if len(sys.argv) > 2: samplerate = int(sys.argv[2])\n\n return source, samplerate\n\n\nif __name__ == '__main__':\n try:\n # Apeerantly roslaunch prepends it's own argv[i]s, so lets ditch trying to read both\n\n # if len(sys.argv) > 3 and sys.argv[3]:\n # lvl = rospy.DEBUG\n # else:\n lvl = rospy.get_param('log_level', rospy.INFO)\n\n win_s = 1024 # fft size\n hop_s = win_s // 2\n\n bmk = BeatMaker(lvl)\n source_, sample_rate = parse()\n\n bmk.audio_init(source_, sample_rate, win_s, hop_s)\n bmk.run()\n\n except rospy.ROSInterruptException: pass\n"
] |
[
[
"numpy.arange",
"numpy.fromstring"
]
] |
SACGF/variantgrid
|
[
"515195e2f03a0da3a3e5f2919d8e0431babfd9c9"
] |
[
"snpdb/management/commands/delete_unused_variants.py"
] |
[
"import numpy as np\nfrom django.core.management.base import BaseCommand\n\nfrom annotation.models import AnnotationRangeLock, VariantAnnotation\nfrom snpdb.models import Variant, VariantZygosityCount\n\n\nclass Command(BaseCommand):\n \"\"\"\n Until 210622 - (PythonKnownVariantsImporter v.16) we used to insert a reference variant (alt='=') for each ALT\n We also didn't have a way to delete variants that are no longer referenced\n \"\"\"\n def add_arguments(self, parser):\n # Usually an annotation range lock is 100k, so you'd expect 50k ref variants in there.\n # So steps=20 will look in a 5k range and probably delete 2.5k Variant and 2.5k VariantZygosityCount\n parser.add_argument('--steps', type=int, default=20, required=False,\n help=\"Number of steps to take in between AnnotationRangeLock regions (which are ~100k)\")\n\n def handle(self, *args, **options):\n # We want to do this in small batches - so use the variant annotation range locks which are all approx the same\n # size (even if a big gap between IDs)\n # Variants from different builds are mixed up together so just do all builds\n # Any overlaps will be quick as not much to delete\n steps = options[\"steps\"]\n\n arl_qs = AnnotationRangeLock.objects.all()\n total = arl_qs.count()\n print(f\"Deleting unused variants in {total} steps...\")\n total_deleted = 0\n for i, range_lock in enumerate(arl_qs.order_by(\"max_variant\")):\n perc = 100 * i / total\n print(f\"{perc:.2f}% done - doing step: {range_lock}\")\n\n linspace = np.linspace(range_lock.min_variant_id, range_lock.max_variant_id, steps + 1).astype(int)\n for s in range(steps):\n start = linspace[s]\n end = linspace[s+1]\n print(f\"{start} - {end} ({end-start})\")\n # Skip the range lock min/max variant as that's protected so can't delete anyway\n # Also need to avoid deleting those that are are within range in another build\n variants_in_range_qs = Variant.objects.filter(pk__gt=start, pk__lt=end)\n unused_variants_qs = variants_in_range_qs.filter(classification__isnull=True,\n clinvar__isnull=True,\n varianttag__isnull=True,\n variantallele__isnull=True,\n cohortgenotype__isnull=True,\n variantcollectionrecord__isnull=True,\n min_variant__isnull=True,\n max_variant__isnull=True)\n variants_deleted = unused_variants_qs._raw_delete(unused_variants_qs.db)\n print(f\"{variants_deleted=}\")\n vzc_qs = VariantZygosityCount.objects.filter(variant_id__gt=start, variant_id__lt=end)\n vzc_qs = vzc_qs.exclude(variant__in=variants_in_range_qs)\n zygosity_count_deleted = vzc_qs._raw_delete(vzc_qs.db)\n print(f\"{zygosity_count_deleted=}\")\n\n va_qs = VariantAnnotation.objects.filter(variant_id__gt=start, variant_id__lt=end)\n va_qs = va_qs.exclude(variant__in=variants_in_range_qs)\n annotation_deleted = va_qs._raw_delete(va_qs.db)\n print(f\"{annotation_deleted=}\")\n total_deleted += variants_deleted\n\n print(f\"Total deleted: {total_deleted}\")\n"
] |
[
[
"numpy.linspace"
]
] |
nvinard/seismicToolBox
|
[
"f304a87ab61ed6c0c2ef730be01421a937ce5b22"
] |
[
"seismicToolBox.py"
] |
[
"\"\"\"\r\nA python module for plotting and manipulating seismic data and headers,\r\nkirchhoff migration and more. Contains the following functions\r\n\r\nload_header : load mat file header\r\nload_segy : load segy dataset\r\nsorthdr : sort seismic header\r\nsortdata : sort seismic data\r\nselectCMP : select a CMP gather with respect to its midpoint position\r\nanalysefold : Positions of gathers and their folds\r\nimageseis : Interactive seismic image\r\nwiggle : Seismic wiggle plot\r\nplothdr : Plots header data\r\nsemblanceWiggle : Interactive semblance plot for velocity analysis\r\napply_nmo : Applies nmo correction\r\nnmo_v : Applies nmo to single CMP gather for constant velocity\r\nnmo_vlog : Applied nmo to single CMP gather for a 1D time-velocity log\r\nnmo_stack : Generates a stacked zero-offset section\r\nstackplot : Stacks all traces in a gather and plots the stacked trace\r\ngeneratevmodel2 : Generates a 2D velocity model\r\nkirk_mig : Kirkhoff migration\r\ntime2depth_trace : time-to-depth conversion for a single trace in time domain\r\ntime2depth_section : time-to-depth conversion for a seismic section in time domain\r\nagc : applies automatic gain control for a given dataset.\r\n\r\n(C) Nicolas Vinard and Musab al Hasani, 2020, v.0.0.7\r\n\r\n- 9.9.2020 added load_header and load_segy\r\n- 31.01.2020 added nth-percentile clipping\r\n- 16.01.2020 Added clipping in wiggle function\r\n- added agc functions\r\n- Fixed semblanceWiggle sorting error\r\n\r\n\r\nMany of the functions here were developed by CREWES and were originally written in MATLAB.\r\nIn the comments of every function we translated from CREWES we refer to its function name\r\nand the CREWES Matlab library at www.crewes.org.\r\nOther functions were translated from MATLAB to Python and originally written by Max Holicki\r\nfor the course \"Geophyiscal methods for subsurface characterization\" tought at TU Delft.\r\n\r\n\"\"\"\r\n\r\nimport struct, sys\r\nimport numpy as np\r\nimport matplotlib\r\nimport copy\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.widgets import Slider, Button\r\nimport sys\r\nfrom typing import Tuple\r\n#from tqdm import tnrange\r\n#from utils import segypy\r\nfrom scipy.io import loadmat\r\n\r\n\r\n### LOAD HEADER OR SEGY DATA ####\r\n\r\ndef load_header(file_path)->np.ndarray:\r\n \"\"\"\r\n\r\n header = load_header(path_to_file)\r\n\r\n Parameters\r\n ----------\r\n file_path: string\r\n Path to mat file\r\n\r\n Returns\r\n -------\r\n SH: np.ndarray of shape (5, # traces)\r\n Header containing information of shot, receiver, CMP and offset positions\r\n\r\n \"\"\"\r\n SH = loadmat(file_path)\r\n\r\n return SH['H_SHT']\r\n\r\ndef load_segy(file_path)->tuple([np.ndarray, np.ndarray, np.ndarray]):\r\n \"\"\"\r\n\r\n Data, SH, STH = load_segy(path_to_file)\r\n\r\n Parameters\r\n ----------\r\n file_path: string\r\n Path to mat file\r\n\r\n Returns\r\n -------\r\n Data: np.ndarray of shape (# time samples, # traces)\r\n Seismic data\r\n SH: np.ndarray of shape (5, # traces)\r\n Header containing information of shot, receiver, CMP and offset positions\r\n STH: np.ndarray\r\n Trace header\r\n\r\n \"\"\"\r\n\r\n segyDataset = segypy.readSegy(file_path)\r\n Data = segyDataset[0]\r\n SH = segyDataset[2]\r\n STH = segyDataset[1]\r\n\r\n return Data, SH, STH\r\n\r\n\r\n##################################################\r\n########## HEADER AND DATA MANIPULATION ##########\r\n##################################################\r\n\r\ndef sorthdr(H_SHT: np.ndarray, sortkey1: int, sortkey2 = None)->np.ndarray:\r\n \"\"\"\r\n sorted_header = sorthdr(H_SHT, sortkey1, sortkey2 = None)\r\n\r\n Sorts the input header according to the sortkey1 value as primary sort,\r\n and within sortykey1 the header is sorted according to sortkey2.\r\n\r\n Valid values for sortkey are:\r\n 1 = Common Shot\r\n 2 = Common Receiver\r\n 3 = Common Midpoint (CMP)\r\n 4 = Common Offset\r\n\r\n Parameters\r\n ----------\r\n H_SHT: np.ndarray of shape (5, # traces)\r\n Header containing information of shot, receiver, CMP and offset positions\r\n sortkey1: int\r\n Primary sort key by which to sort the header\r\n\r\n Optional parameters\r\n -------------------\r\n sortkey2: int\r\n Secondary sort key by which to sort the header\r\n\r\n Returns\r\n -------\r\n H_SRT: np.ndarray of shape (5, # traces)\r\n Sorted header\r\n\r\n Translated to Python from Matlab by Nicolas Vinard and Musab al Hasani, 2019\r\n\r\n \"\"\"\r\n\r\n if sortkey2 is None:\r\n if sortkey1 == 4:\r\n sortkey2 = 3\r\n if sortkey1 == 1:\r\n sortkey2 = 2\r\n if sortkey1 == 2:\r\n sortkey2 = 1\r\n if sortkey1 == 3:\r\n sortkey2 = 4\r\n\r\n\r\n index_sort = np.lexsort((H_SHT[sortkey2, :], H_SHT[sortkey1, :]))\r\n\r\n H_SRT = H_SHT[:, index_sort]\r\n\r\n return H_SRT\r\n\r\n\r\ndef sortdata(\r\n data: np.ndarray,\r\n H_SHT: np.ndarray,\r\n sortkey1: int,\r\n sortkey2 = None\r\n )->tuple([np.ndarray, np.ndarray]):\r\n\r\n \"\"\"\r\n sorted_data, sorted_header = sortdata(data, H_SHT, sortkey1, sortkey2 = None):\r\n\r\n Sorts data using the data's header. Sorting order is defined according to the\r\n sortkey1 value as primary sort, and within sortykey1 it is is sorted\r\n again according to sortkey2.\r\n\r\n Valid values for sortkey are:\r\n\r\n 1 = Common Shot\r\n 2 = Common Receiver\r\n 3 = Common Midpoint (CMP)\r\n 4 = Common Offset\r\n\r\n Parameters\r\n ----------\r\n data: np.ndarray of shape (# time samples, # traces)\r\n The seismic data\r\n H_SHT: np.ndarray of shape (5, # traces)\r\n Header containing information of shot, receiver, CMP and offset positions\r\n\r\n Optional parameters\r\n -------------------\r\n sortkey2: int\r\n Secondary sort key by which to sort the header\r\n\r\n Returns\r\n -------\r\n sorted_data: np.ndarray of shape (# time samples, # traces)\r\n Sorted seismic data\r\n sorted_header: np.ndarray of shape (5, # traces)\r\n Sorted header\r\n\r\n\r\n Translated to Python from Matlab by Nicolas Vinard and Musab al Hasani, 2019\r\n\r\n \"\"\"\r\n\r\n if sortkey2 is None:\r\n if sortkey1 == 4:\r\n sortkey2 = 3\r\n if sortkey1 == 1:\r\n sortkey2 = 2\r\n if sortkey1 == 2:\r\n sortkey2 = 1\r\n if sortkey1 == 3:\r\n sortkey2 = 4\r\n\r\n ns, n_traces = data.shape\r\n\r\n # Put header on top of data\r\n headered_data = np.append(H_SHT, data, axis=0)\r\n index_sort = np.lexsort((headered_data[sortkey2, :], headered_data[sortkey1, :]))\r\n sorted_headerData = headered_data[:, index_sort]\r\n sorted_data = sorted_headerData[len(H_SHT):, :]\r\n sorted_header = sorted_headerData[:len(H_SHT), :]\r\n\r\n return sorted_data, sorted_header\r\n\r\n\r\ndef selectCMP(\r\n CMPsorted: np.ndarray,\r\n H_CMP: np.ndarray,\r\n midpnt: float\r\n )->tuple([np.ndarray, np.ndarray]):\r\n\r\n \"\"\"\r\n\r\n CMPGather, H_CMPGather = selectCMP(CMPsorted, H_CMP, midpnt):\r\n\r\n This function selects a CMP gather according to its midpoint position.\r\n Midpoints can be found with the function analysefold.\r\n\r\n Parameters\r\n ----------\r\n CMPsorted: np.ndarray of shape (# time samples, # traces)\r\n CMP sorted seismic data\r\n H_CMP: np.ndarray of shape (5, # traces)\r\n CMP sorted header\r\n midpnt: float\r\n Midpoint of the CMP-gather you want to plot\r\n\r\n Returns\r\n -------\r\n CMPGather: np.ndarray\r\n Selected CMP-gather\r\n H_CMPgather: np.ndarray\r\n Header of selectred CMP gather\r\n\r\n see also sortdata, anaylsefold\r\n\r\n\r\n Translated to Python from Matlab by Nicolas Vinard and Musab al Hasani, 2019\r\n\r\n \"\"\"\r\n\r\n # Read the amount of time-samples and traces from the shape of the datamatrix\r\n nt, ntr = CMPsorted.shape\r\n\r\n # initialise arrays\r\n CMPgather = np.empty(CMPsorted.shape)\r\n H_CMPgather = np.empty(H_CMP.shape)\r\n\r\n # CMP-gather trace counter initialisation:\r\n # l is the number of traces in a CMP gather\r\n l = 0\r\n\r\n # Scan the CMP-sorted dataset for traces with the correct midpoint and put those traces in a cmpgather.\r\n\r\n for i in range(0,ntr):\r\n\r\n if H_CMP[3,i] == midpnt:\r\n\r\n CMPgather[:,l] = CMPsorted[:,i]\r\n H_CMPgather[:,l] = H_CMP[:,i]\r\n l = l + 1\r\n\r\n return CMPgather[:,:l], H_CMPgather[:,:l]\r\n\r\n\r\n\r\n##################################################\r\n########## HEADER AND DATA VISUALIZATION #########\r\n##################################################\r\n\r\ndef analysefold(H_SHT: np.ndarray, sortkey: int\r\n )->tuple([np.ndarray, np.ndarray]):\r\n\r\n \"\"\"\r\n positions, folds = analysefold(H_SHT, sortkey)\r\n\r\n This function gives the positions of the gathers, such as CMP's or\r\n Common-Offset gathers, as well as their folds. Furthermore, a\r\n crossplot is generated.\r\n\r\n Analysefold analyzes the fold of a dataset according to the value of sortkey:\r\n 1 = Common Shot\r\n 2 = Common Receiver\r\n 3 = Common Midpoint (CMP)\r\n 4 = Common Offset\r\n\r\n Parameters\r\n ----------\r\n H_SHT: np.ndarray of shape (5, # traces)\r\n Shot-sorted data header\r\n sortkey: int\r\n Sorting key\r\n\r\n Returns\r\n -------\r\n positions: np.ndarray\r\n Gather positions\r\n folds: np.ndarray\r\n Gather-folds\r\n\r\n\r\n Translated to Python from Matlab by Musab al Hasani and Nicolas Vinard, 2019\r\n\r\n \"\"\"\r\n\r\n # Read the amount of time-samples and traces from the shape of the datamatrix\r\n nt, ntr = H_SHT.shape\r\n\r\n # Sort the header\r\n H_SRT = sorthdr(H_SHT, sortkey)\r\n\r\n # Midpoint initialization, midpoint distance and fold\r\n gather_positions = H_SRT[1,0]\r\n gather_positions = np.array(gather_positions)\r\n gather_positions = np.reshape(gather_positions, (1,1))\r\n gather_folds = 1\r\n gather_folds = np.array(gather_folds)\r\n gather_folds = np.reshape(gather_folds, (1,1))\r\n\r\n # Gather trace counter initialization\r\n # l is the amount of traces in a gather\r\n l = 0\r\n\r\n # Distance counter initialization\r\n # m is the amount of distances in the sorted dataset\r\n m = 0\r\n\r\n for k in range(1, ntr):\r\n\r\n if H_SRT[sortkey, k] == gather_positions[m]:\r\n l = l + 1\r\n else:\r\n if m == 0:\r\n gather_folds[0,0] = l\r\n else:\r\n gather_folds = np.append(gather_folds, l)\r\n\r\n m = m + 1\r\n gather_positions = np.append(gather_positions, H_SRT[sortkey, k])\r\n l = 0\r\n\r\n gather_folds = gather_folds+1\r\n\r\n # Remove first superfluous entry in gather_positions\r\n gather_positions = gather_positions[1:]\r\n\r\n # Make a plot\r\n fig, ax = plt.subplots(figsize=(8,6))\r\n ax.plot(gather_positions, gather_folds, 'x')\r\n ax.set_title('Amount of traces per gather')\r\n ax.set_xlabel('gather-distance [m]')\r\n ax.set_ylabel('fold')\r\n fig.tight_layout()\r\n\r\n return gather_positions, gather_folds\r\n\r\ndef imageseis(DataO: np.ndarray, x=None, t=None, gain=1, perc=100):\r\n\r\n \"\"\"\r\n imageseis(Data, x=None, t=None, maxval=-1, gain=1, perc=100):\r\n\r\n This function generates a seismic image plot including interactive\r\n handles to apply a gain and a clip\r\n\r\n Parameters\r\n ----------\r\n Data: np.ndarray of shape (# time samples, # traces)\r\n Seismic data\r\n\r\n Optional parameters\r\n -------------------\r\n gain: float\r\n Apply simple gain\r\n x: np.ndarray of shape Data.shape[1]\r\n x-coordinates to Plot\r\n t: np.ndarray of shape Data.shap[0]\r\n t-axis to plot\r\n perc: float\r\n nth parcintile to be clipped\r\n\r\n Returns\r\n -------\r\n Seismic image\r\n\r\n Adapted from segypy (Thomas Mejer Hansen, https://github.com/cultpenguin/segypy/blob/master/segypy/segypy.py)\r\n\r\n Musab and Nicolas added interactive gain and clip, 2019\r\n\r\n\r\n \"\"\"\r\n\r\n # Make a copy of the original, so that it won't change the original one ouside the scope of the function\r\n Data = copy.copy(DataO)\r\n\r\n # calculate value of nth-percentile, when perc = 100, data won't be clipped.\r\n nth_percentile = np.abs(np.percentile(Data, perc))\r\n\r\n # clip data to the value of nth-percintile\r\n Data = np.clip(Data, a_min=-nth_percentile, a_max = nth_percentile)\r\n\r\n ns, ntraces = Data.shape\r\n maxval = -1\r\n Dmax = np.max(Data)\r\n maxval = -1*maxval*Dmax\r\n\r\n if t is None:\r\n t = np.arange(0, ns)\r\n tLabel = 'Sample number'\r\n else:\r\n t = t\r\n tc = t\r\n tLabel = 'Time [s]'\r\n if len(t)!=ns:\r\n print('Error: time array not of same length as number of time samples in data \\n Samples in data: {}, sample in input time array: {}'.format(ns, len(t)))\r\n sys.exit()\r\n\r\n if x is None:\r\n x = np.arange(0, ntraces) +1\r\n xLabel = 'Trace number'\r\n else:\r\n x = x\r\n xLabel = 'Distance [m]'\r\n if len(x)!=ntraces:\r\n print('Error: x array not of same length as number of trace samples in data \\n Samples in data: {}, sample in input x array: {}'.format(ns, len(t)))\r\n\r\n plt.subplots_adjust(left=0.25, bottom=0.3)\r\n img = plt.pcolormesh(x, t, Data*gain, vmin=-1*maxval, vmax=maxval, cmap='seismic', shading='auto')\r\n cb = plt.colorbar()\r\n plt.axis('auto')\r\n plt.xlabel(xLabel)\r\n plt.ylabel(tLabel)\r\n plt.gca().invert_yaxis()\r\n\r\n # Add interactice widgets\r\n # Defines position of the toolbars\r\n ax_cmax = plt.axes([0.25, 0.15, 0.5, 0.03])\r\n ax_cmin = plt.axes([0.25, 0.1, 0.5, 0.03])\r\n ax_gain = plt.axes([0.25, 0.05, 0.5, 0.03])\r\n\r\n s_cmax = Slider(ax_cmax, 'max clip ', 0, np.max(np.abs(Data)), valinit=np.max(np.abs(Data)))\r\n s_cmin = Slider(ax_cmin, 'min clip', -np.max(np.abs(Data)), 0, valinit=-np.max(np.abs(Data)))\r\n s_gain = Slider(ax_gain, 'gain', gain, 10*gain, valinit=gain)\r\n\r\n def update(val, s=None):\r\n _cmin = s_cmin.val/s_gain.val\r\n _cmax = s_cmax.val/s_gain.val\r\n img.set_clim([_cmin, _cmax])\r\n plt.draw()\r\n\r\n s_cmin.on_changed(update)\r\n s_cmax.on_changed(update)\r\n s_gain.on_changed(update)\r\n\r\n return img\r\n\r\ndef wiggle(\r\n DataO: np.ndarray,\r\n x=None,\r\n t=None,\r\n skipt=1,\r\n lwidth=.5,\r\n gain=1,\r\n typeD='VA',\r\n color='red',\r\n perc=100):\r\n\r\n \"\"\"\r\n wiggle(DataO, x=None, t=None, maxval=-1, skipt=1, lwidth=.5, gain=1, typeD='VA', color='red', perc=100)\r\n\r\n This function generates a wiggle plot of the seismic data.\r\n\r\n Parameters\r\n ----------\r\n DataO: np.ndarray of shape (# time samples, # traces)\r\n Seismic data\r\n\r\n Optional parameters\r\n -------------------\r\n x: np.ndarray of shape Data.shape[1]\r\n x-coordinates to Plot\r\n t: np.ndarray of shape Data.shap[0]\r\n t-axis to plot\r\n skipt: int\r\n Skip trace, skips every n-th trace\r\n ldwidth: float\r\n line width of the traces in the figure, increase or decreases the traces width\r\n typeD: string\r\n With or without filling positive amplitudes. Use type=None for no filling\r\n color: string\r\n Color of the traces\r\n perc: float\r\n nth parcintile to be clipped\r\n\r\n Returns\r\n -------\r\n Seismic wiggle plot\r\n\r\n Adapted from segypy (Thomas Mejer Hansen, https://github.com/cultpenguin/segypy/blob/master/segypy/segypy.py)\r\n\r\n\r\n \"\"\"\r\n # Make a copy of the original, so that it won't change the original one ouside the scope of the function\r\n Data = copy.copy(DataO)\r\n\r\n # calculate value of nth-percentile, when perc = 100, data won't be clipped.\r\n nth_percentile = np.abs(np.percentile(Data, perc))\r\n\r\n # clip data to the value of nth-percentile\r\n Data = np.clip(Data, a_min=-nth_percentile, a_max = nth_percentile)\r\n\r\n ns = Data.shape[0]\r\n ntraces = Data.shape[1]\r\n\r\n fig = plt.gca()\r\n ax = plt.gca()\r\n ntmax=1e+9 # used to be optinal\r\n\r\n if ntmax<ntraces:\r\n skipt=int(np.floor(ntraces/ntmax))\r\n if skipt<1:\r\n skipt=1\r\n\r\n if x is not None:\r\n x=x\r\n ax.set_xlabel('Distance [m]')\r\n else:\r\n x=range(0, ntraces)\r\n ax.set_xlabel('Trace number')\r\n\r\n if t is not None:\r\n t=t\r\n yl='Time [s]'\r\n else:\r\n t=np.arange(0, ns)\r\n yl='Sample number'\r\n\r\n dx = abs(x[1]-x[0])\r\n\r\n Dmax = np.nanmax(Data)\r\n maxval = np.abs(Dmax)\r\n\r\n for i in range(0, ntraces, skipt):\r\n\r\n # use copy to avoid truncating the data\r\n trace = copy.copy(Data[:, i])\r\n trace = Data[:, i]\r\n trace[0] = 0\r\n trace[-1] = 0\r\n traceplt = x[i] + gain * skipt * dx * trace / maxval\r\n traceplt = np.clip(traceplt, a_min=x[i]-dx, a_max=(dx+x[i]))\r\n\r\n ax.plot(traceplt, t, color=color, linewidth=lwidth)\r\n\r\n offset = x[i]\r\n\r\n if typeD=='VA':\r\n for a in range(len(trace)):\r\n if (trace[a] < 0):\r\n trace[a] = 0\r\n ax.fill_betweenx(t, offset, traceplt, where=(traceplt>offset), interpolate='True', linewidth=0, color=color)\r\n ax.grid(False)\r\n\r\n ax.set_xlim([x[0]-dx, x[-1]+dx])\r\n\r\n ax.invert_yaxis()\r\n ax.set_ylim([np.max(t), np.min(t)])\r\n ax.set_ylabel(yl)\r\n\r\ndef plothdr(Header: np.ndarray, trmin=None, trmax=None):\r\n\r\n \"\"\"\r\n plothdr(Header, trmin, trmax) - plots the header data\r\n\r\n Parameters\r\n ----------\r\n Header: np.ndarray\r\n Data header\r\n\r\n Optional parameters\r\n -------------------\r\n trmin: int\r\n Start with trace trmin\r\n trmax: int\r\n End with trace trmax, int\r\n\r\n Returns\r\n -------\r\n Figures:\r\n four plots:\r\n (1) shot position\r\n (2) CMP\r\n (3) receiver position\r\n (4) trace offset\r\n\r\n Translated to Python from Matlab by Musab al Hasani and Nicolas Vinard, 2019\r\n\r\n \"\"\"\r\n\r\n if trmin == None:\r\n trmin = 0\r\n\r\n if trmax == None:\r\n trmax = np.max(np.size(Header[0,:]))\r\n\r\n fig, ax = plt.subplots(2,2, figsize=(10,8), sharex=True)\r\n\r\n\r\n ind = np.array([2, 4, 3, 1])\r\n\r\n ax[0,0].plot(np.arange(trmin, trmax, 1), Header[1, trmin:trmax], 'x')\r\n ax[0,1].plot(np.arange(trmin, trmax, 1), Header[3, trmin:trmax], 'x')\r\n ax[1,0].plot(np.arange(trmin, trmax, 1), Header[2, trmin:trmax], 'x')\r\n ax[1,1].plot(np.arange(trmin, trmax, 1), Header[4, trmin:trmax], 'x')\r\n ax[0,0].set(title = 'shot positions', xlabel = 'trace number', ylabel = 'shot position [m]')\r\n ax[0,1].set(title = 'common midpoint positions (CMPs)', xlabel = 'trace number', ylabel = 'CMP [m]')\r\n ax[1,0].set(title = 'receiver positions', xlabel = 'trace number', ylabel = 'receiver position [m]')\r\n ax[1,1].set(title = 'trace offset', xlabel = 'trace number', ylabel = 'offset [m]')\r\n ax[0,0].xaxis.set_tick_params(which='both', labelbottom=True)\r\n ax[0,1].xaxis.set_tick_params(which='both', labelbottom=True)\r\n fig.tight_layout(pad=1)\r\n\r\n\r\n##################################################\r\n######### DATA ANALYSIS PRE-PROCESSING ###########\r\n##################################################\r\n\r\ndef semblanceWiggle(\r\n CMPgather: np.ndarray,\r\n H_CMPgather: np.ndarray,\r\n H,\r\n vmin:float,\r\n vmax:float,\r\n vstep:float\r\n )->tuple([np.ndarray, np.ndarray]):\r\n\r\n \"\"\"\r\n v_picks, t_picks = semblanceWiggle(CMPgather,TrcH,H,vmin,vmax,vstep):\r\n This funcion generates an interactive semblance plot for the velocity analysis.\r\n Picks are generated by left-clicking the semblance with the mouse. A red cross\r\n indicates the location of the picks. Picks can be removed by click in the middle.\r\n To end the picking press enter.\r\n\r\n Parameters\r\n ----------\r\n CMPgather: np.ndarray\r\n CMP gather\r\n H_CMPgather: np.ndarray\r\n CMP header with postional information\r\n H:\r\n Seismic data header\r\n vmin: float\r\n Minimum velocity in semblance analysis (m/s)\r\n vmax: float\r\n Maximum velocity in semblance analysis (m/s)\r\n vstep: float\r\n Velocity step between vmin and vmax (m/s)\r\n\r\n Returns\r\n -------\r\n v_picks: np.ndarray\r\n Picked velocities\r\n t_picks: np.ndarray\r\n Time picks at picked velocities\r\n\r\n Translated to Python from Matlab by Nicolas Vinard and Musab al Hasani, 2019\r\n \"\"\"\r\n\r\n ExpSmooth=50 # used to be optinal argument\r\n\r\n # Create vectors\r\n x = H_CMPgather[4,:]\r\n t = np.arange(0,H['ns']*H['dt']*1e-6, H['dt']*1e-6)\r\n t2 = np.arange(0,H['ns']*H['dt']*1e-6, H['dt']*1e-6)\r\n v = np.arange(vmin,vmax+vstep,vstep)\r\n\r\n # Compute the squares\r\n xsq = np.square(x)\r\n tsq = np.square(t)\r\n vsq = np.square(v)\r\n\r\n # reshape for broadcasting\r\n xsq = xsq.reshape(1,len(xsq),1)\r\n tsq = tsq.reshape(len(tsq),1,1)\r\n vsq = vsq.reshape(1,1,len(vsq))\r\n\r\n t = t.reshape((len(t),1,1))\r\n x = x.reshape((1,len(x),1))\r\n v = v.reshape((1,1,len(v)))\r\n\r\n T = np.sqrt(tsq+xsq/vsq)\r\n\r\n # Interpolate data to NMO time and stack for each velocity\r\n tt = np.exp(-ExpSmooth*np.abs(np.subtract(t,t.T)))\r\n tt = tt.reshape(np.size(t), np.size(t))\r\n\r\n S = np.zeros((np.size(t),np.size(v))) # Preallocate semblance\r\n q = np.zeros((np.size(t),np.size(x))) # Preallocate temporary container\r\n b = np.zeros((np.size(t),1))\r\n\r\n for vi in range(0, np.size(v)):\r\n\r\n for j in range(0, np.size(x)):\r\n\r\n q[:,j] = np.interp(T[:,j,vi], t[:,0,0],CMPgather[:,j], left=0,right=0)\r\n\r\n r = np.sum(q,axis=1, keepdims=True)\r\n C = t*np.size(x)/np.sum(x**2)*x**2 / T\r\n C[np.isnan(C)] = 0\r\n\r\n # Three expressions for conventional semblance\r\n Crq = np.sum(tt*np.sum(r*q,axis=1,keepdims=True),axis=0,keepdims=True).T\r\n Crr = np.sum(tt*np.size(x)*r**2,axis=0,keepdims=True).T\r\n Cqq = np.sum(tt*np.sum(q**2,axis=1,keepdims=True),axis=0,keepdims=True).T\r\n\r\n normalS = Crq**2/(Crr*Cqq)\r\n Brq = np.sum(tt*np.sum(r*(C[:,:,vi]*q),axis=1,keepdims=True),axis=0,keepdims=True).T\r\n Brr = np.sum(tt*np.sum(r**2*C[:,:,vi],axis=1,keepdims=True),axis=0,keepdims=True).T\r\n Bqq = np.sum(tt*np.sum(C[:,:,vi]*(q**2),axis=1,keepdims=True),axis=0,keepdims=True).T\r\n\r\n # Minimize b\r\n A = Crr*Bqq+Cqq*Brr\r\n Rrq=Crq/(Crq-Brq)\r\n Rrr=Crr/(Crr-Brr)\r\n Rqq=Cqq/(Cqq-Bqq)\r\n\r\n ind = np.logical_or(\r\n np.logical_and(\r\n np.less(Rrr,Rrq), np.less(Rrq,Rqq)),\r\n np.logical_and(\r\n np.less(Rqq,Rrq), np.less(Rrq,Rrr)))\r\n\r\n ind = ind.astype(int)\r\n\r\n for ik in range(0, len(ind)-1):\r\n if ind[ik,0] == 1:\r\n b[ik] = (1-(2*Crq[ik,0]*Brr[ik,0]*Bqq[ik,0]-Brq[ik,0]*A[ik,0])/(2*Brq[ik,0]*Crr[ik,0]*Cqq[ik,0]-Crq[ik,0]*A[ik,0]))**(-1)\r\n elif ind[ik,0] == 0:\r\n b[ik] = Rrq[ik,0]\r\n\r\n ind2 = np.zeros((ind.shape)).astype(int)\r\n\r\n for ij in range(0,len(b)):\r\n if (b[ij,0] > 1 or b[ij,0] < 0):\r\n ind2[ij] = 1\r\n b[ij,0] = 0\r\n\r\n else:\r\n ind2[ij] = 0\r\n\r\n Wrq = (1-b)*Crq + b*Brq\r\n Wrr = (1-b)*Crr + b*Brr;\r\n Wqq = (1-b)*Cqq + b*Bqq;\r\n\r\n tmp = Wrq**2 / (Wrr*Wqq)\r\n S[:,vi] = tmp.reshape(len(tmp))\r\n\r\n Ind = np.argwhere(ind2)[:,0]\r\n s = Brq[Ind]**2/(Brr[Ind]*Bqq[Ind]);\r\n IndF=np.argwhere(Ind)\r\n\r\n IND = []\r\n\r\n for ik in range(0, len(s)):\r\n if S[ik,vi] > s[ik]:\r\n IND.append(1)\r\n else:\r\n IND.append(0)\r\n\r\n S[IndF[IND],vi]=s[IND]\r\n\r\n fig, ax = plt.subplots(nrows= 1, figsize=(4,10))\r\n ax.pcolormesh(v, t, S, shading='auto')\r\n ax.invert_yaxis()\r\n ax.set_xlabel('Velocity, m/s', fontsize=12)\r\n ax.set_ylabel('Time, s', fontsize=12)\r\n ax.set_title('Left-click: pick \\n - Middle-click: delete pick \\n - Enter: save picks ')\r\n fig.tight_layout(pad=2.0, h_pad=1.0)\r\n #plt.waitforbuttonpress(timeout=15)\r\n picks = plt.ginput(n=-1,timeout=15)\r\n plt.close()\r\n\r\n\r\n if not picks:\r\n sys.exit(\"No time-velocity picks selected. Closing.\")\r\n\r\n\r\n picks = np.asarray(picks)\r\n v_picks = picks[:,0]\r\n t_picks = picks[:,1]\r\n index_sort = np.argsort(t_picks)\r\n t_picks = t_picks[index_sort]\r\n v_picks = v_picks[index_sort]\r\n t_picks = np.insert(t_picks, 0, 0)\r\n v_picks = np.insert(v_picks, 0, v_picks[0])\r\n t_picks = np.insert(t_picks, len(t_picks), t[-1])\r\n v_picks = np.insert(v_picks, len(v_picks), v_picks[-1])\r\n\r\n return v_picks, t_picks\r\n\r\n\r\ndef apply_nmo(\r\n CMPgather: np.ndarray,\r\n H_CMPgather: np.ndarray,\r\n H,\r\n t: np.ndarray,\r\n v: np.ndarray,\r\n smute=0\r\n )->np.ndarray:\r\n\r\n \"\"\"\r\n NMOedCMP = apply_nmo(CMPgather, H_CMPgather, H, t, v, smute=0)\r\n\r\n This function applies NMO to a single CMP-gather given a 1D velocity-time log\r\n Addtionally it outputs three plots showing the log, the CMP-gather before and after NMO\r\n\r\n Parameters\r\n ----------\r\n CMPgather gather: np.ndarray\r\n CMP gather\r\n H_CMPgather: np.ndarray\r\n Header of CMP-gather\r\n H:\r\n Seismic data header\r\n t: np.ndarray of shape (#picks,)\r\n Time of velocity picks in seconds\r\n v: np.ndarray of shape (#picks,)\r\n Velocity picks in m/s\r\n\r\n Optinal parameters\r\n ------------------\r\n smute: float\r\n Stretch-mute value (default 0)\r\n\r\n Returns\r\n -------\r\n Three plots:\r\n (1) log\r\n (2) CMP before\r\n (3) CMP after\r\n NMOedCMP: np.ndarray\r\n NMO-ed CMP gather\r\n\r\n Translated to Python from Matlab by Musab al Hasani and Nicolas Vinard, 2019\r\n\r\n \"\"\"\r\n # Convert time to ms\r\n t = 1000*t\r\n dt = H['dt']*1e-3 # Convert H['dt'] to ms\r\n nt = H['ns'] # Number of time samples\r\n\r\n # append zero to first time vector if not already 0\r\n if t[0] > 0:\r\n t = np.append(0.0, t)\r\n v = np.append(v[0], v)\r\n\r\n # End of t should be the total time\r\n if t[len(t)-1] < dt*nt:\r\n t = np.append(t, dt*nt)\r\n v = np.append(v, v[len(v)-1])\r\n\r\n # Plot time-velocity log\r\n t_plot = np.arange(0,dt*nt, dt)\r\n v2 = np.interp(t_plot, t, v)\r\n\r\n c = v2\r\n dt = dt/1000\r\n\r\n nx = np.min(CMPgather.shape)\r\n\r\n NMOedCMP = np.zeros((nt, nx))\r\n\r\n if smute == 0:\r\n for ix in range(0, nx):\r\n\r\n off = H_CMPgather[4,ix]\r\n\r\n for it in range(0, nt):\r\n\r\n off2c2 = (off*off)/(c[it]*c[it])\r\n t0 = it * dt\r\n t2 = t0*t0 + off2c2\r\n tnmo = np.sqrt(t2) - t0\r\n itnmo1 = int(np.floor(tnmo/dt))\r\n difft = (tnmo-dt*itnmo1)/dt\r\n\r\n if (it+itnmo1) < nt:\r\n NMOedCMP[it,ix] = (1.-difft)*CMPgather[it+itnmo1,ix] + difft*CMPgather[it+itnmo1,ix]\r\n\r\n if it+itnmo1 == nt:\r\n NMOedCMP[it, ix] = CMPgather[it+itnmo1-1, ix]\r\n\r\n else:\r\n\r\n for ix in range(0, nx):\r\n\r\n off = H_CMPgather[4,ix]\r\n\r\n for it in range(0, nt):\r\n\r\n off2c2 = (off*off)/(c[it]*c[it])\r\n t0 = it * dt\r\n t02 = t0*t0\r\n t2 = t02*off2c2\r\n tnmo = np.sqrt(t2)-t0\r\n\r\n if it==1:\r\n dtnmo = 1000.\r\n else:\r\n dtnmo = np.abs(np.sqrt(1+off2c2/t02)) - 1.\r\n\r\n itnmo1 = int(np.floor(tnmo/dt))\r\n difft = (tnmo-dt*itnmo1)/dt\r\n\r\n if (it+itnmo1) < nt:\r\n if dtnmo >= smute:\r\n NMOedCMP[it,ix] = 0.\r\n else:\r\n NMOedCMP[it, ix] = (1.-difft)*CMPgather[it+itnmo,ix] + difft*CMPgather[it+itnmo1,ix]\r\n if it+itnmo1 == nt:\r\n NMOedCMP[it, ix] = CMPgather[it+itnmo1-1,ix]\r\n\r\n return NMOedCMP\r\n\r\n\r\ndef semblance(\r\n cmp_sorted_data: np.ndarray,\r\n cmp_sorted_header: np.ndarray,\r\n H,\r\n cmp_positions: list,\r\n vmin=1500,vmax=4000, vstep=25\r\n )->list:\r\n\r\n \"\"\"\r\n tvpicks = semblance(cmp_sorted_data, cmp_sorted_header, H, cmp_positions, vmin=1500, vmax=4000, vstep=25)\r\n\r\n Example: cmp_positions = [800, 1200, 2000, 3000]\r\n\r\n This function calls semblance for the given list of cmp positions\r\n\r\n Parameters\r\n ----------\r\n cmp_sorted_data gather: np.ndarray\r\n cmp sorted data\r\n cmp_sorted_header: np.ndarray\r\n cmp sorted header\r\n H:\r\n Seismic data header\r\n cmp_positions: list,\r\n list of cmp positions (see Example above)\r\n\r\n Optinal parameters\r\n ------------------\r\n vmin: float\r\n minimum velocity\r\n vmax: float\r\n maximum velocity\r\n vstep: float\r\n velocity increment\r\n\r\n Returns\r\n -------\r\n tvpicks: list\r\n list of travel-time picks required for generatevmodel2\r\n\r\n Written by Musab al Hasani and Nicolas Vinard, 2020\r\n\r\n \"\"\"\r\n\r\n tpicks_list = []\r\n vpicks_list = []\r\n\r\n for i, cmp_position in enumerate(cmp_positions):\r\n cmp, H_cmp = selectCMP(cmp_sorted_data, cmp_sorted_header, cmp_position)\r\n vpicks, tpicks = semblanceWiggle(cmp, H_cmp, H, vmin=vmin, vmax=vmax, vstep=vstep)\r\n tpicks_list.append(tpicks)\r\n vpicks_list.append(vpicks)\r\n\r\n tvpicks = list([tpicks_list, vpicks_list])\r\n\r\n return tvpicks\r\n\r\n\r\ndef nmo_v(\r\n cmp_gather: np.ndarray,\r\n H_CMPgather: np.ndarray,\r\n H,\r\n c: float,\r\n smute=0\r\n )->np.ndarray:\r\n\r\n \"\"\"\r\n\r\n NMOedCMP = nmo_v(cmp_gather, H_CMPgather, H, c, smute=0)\r\n\r\n This function applies NMO to a single CMP-gather, with linear interpolation,\r\n accroding to a constant velocity c\r\n\r\n Parameters\r\n ---------\r\n cmp_gather: np.ndarray\r\n CMP-gather\r\n H_CMPgather: np.ndarray\r\n Header of CMP-gather\r\n H:\r\n Data header\r\n c: float\r\n Constant velocity in m/s\r\n\r\n Optional parameters\r\n -------------------\r\n smute: float\r\n Stretch-mute value (default 0) means no stretch muting\r\n\r\n Returns\r\n -------\r\n NMOedCMP: np.ndarray\r\n NMO-ed CMP gather\r\n\r\n Translated to Python from Matlab by Nicolas Vinard and Musab al Hasani, 2019\r\n\r\n Todo: Change stretch mute default to None or something\r\n\r\n \"\"\"\r\n\r\n nt, nx = cmp_gather.shape\r\n dt = H['dt'] / 1000000.\r\n cmp_new = np.zeros((cmp_gather.shape))\r\n\r\n if smute == 0:\r\n\r\n for ix in range(0, nx):\r\n\r\n off = H_CMPgather[4, ix]\r\n off2c2 = (off * off)/(c * c)\r\n\r\n for it in range(0, nt):\r\n\r\n t0 = it * dt\r\n t2 = t0 * t0 + off2c2\r\n tnmo = np.sqrt(t2) - t0\r\n itnmo1 = int(np.floor(tnmo / dt))\r\n difft = (tnmo-dt * itnmo1) / dt\r\n\r\n if it + itnmo1 + 1 < nt:\r\n cmp_new[it, ix] = (1. - difft) * cmp_gather[it + itnmo1, ix] + difft * cmp_gather[it + itnmo1 + 1,ix]\r\n if it+itnmo1 == nt-1:\r\n cmp_new[it,ix] = cmp_gather[it+itnmo1,ix]\r\n\r\n else:\r\n\r\n for ix in range(0, nx):\r\n\r\n off = H_CMPgather[4,ix]\r\n off2c2 = (off * off)/(c * c)\r\n\r\n for it in range(0, nt):\r\n t0 = it * dt\r\n t02 = t0 * t0\r\n t2 = t02 + off2c2\r\n tnmo = np.sqrt(t2) - t0\r\n\r\n if it == 0:\r\n dtnmo = 1000000.;\r\n else:\r\n dtnmo = np.abs(np.sqrt(1 + off2c2/t02)) - 1.\r\n\r\n itnmo1 = int(np.floor(tnmo / dt))\r\n difft = (tnmo - dt * itnmo1) / dt\r\n\r\n if it + itnmo1 + 1 < nt:\r\n if dtnmo > smute:\r\n cmp_new[it, ix] = 0.0\r\n else:\r\n cmp_new[it, ix] = (1.-difft) * cmp_gather[it + itnmo1, ix] + difft*cmp_gather[it + itnmo1 + 1, ix]\r\n\r\n if it + itnmo1 == nt - 1:\r\n cmp_new[it, ix] = cmp_gather[it + itnmo1, ix]\r\n\r\n return cmp_new\r\n\r\ndef nmo_vlog(\r\n CMPgather: np.ndarray,\r\n H_CMPgather: np.ndarray,\r\n H: dict,\r\n t: np.ndarray,\r\n v: np.ndarray,\r\n smute=0\r\n )->tuple([np.ndarray, np.ndarray]):\r\n\r\n \"\"\"\r\n NMOedCMP = nmo_vlog(CMPgather, H_CMPgather, H, t, v, smute=0)\r\n\r\n This function applied NMO to a single CMP-gather given a 1D velocity-time log\r\n Addtionally it outputs three plots showing the log, the CMP-gather before and after NMO\r\n\r\n Parameters\r\n ----------\r\n CMP gather: np.ndarray\r\n CMP gather\r\n H_CMPgather: np.ndarray\r\n Header of CMP gather\r\n H: dict\r\n Seismid data header\r\n t: np.ndarray of shape (#picks,)\r\n Time of velocity picks in seconds\r\n v: np.ndarray of shape (#picks,)\r\n Velocity picks in m/s\r\n\r\n Optional parameters\r\n --------------------\r\n smute:float\r\n stretch-mute value (default 0) meaning no mute\r\n\r\n Returns\r\n -------\r\n Three plots: log, CMO before and after\r\n NMOedCMP: np.ndarray\r\n NMO-ed CMP gather\r\n v_interp: np.ndarray\r\n Interpolated velocities (same length as time vector)\r\n\r\n Translated to Python from Matlab by Musab al Hasani and Nicolas Vinard, 2019\r\n\r\n \"\"\"\r\n # Convert time to ms\r\n t = 1000*t\r\n dt = H['dt']*1e-3\r\n nt = H['ns'] # Number of time samples\r\n\r\n # append zero to first time vector if not already 0\r\n if t[0] > 0:\r\n t = np.append(0.0, t)\r\n v = np.append(v[0], v)\r\n\r\n # End of t should be the total time\r\n if t[len(t)-1] < dt*nt:\r\n t = np.append(t, dt*nt)\r\n v = np.append(v, v[len(v)-1])\r\n\r\n # Plot time-velocity log\r\n t_plot = np.arange(0,dt*nt, dt)\r\n v2 = np.interp(t_plot, t, v)\r\n\r\n fig = plt.figure(figsize=(12,5))\r\n plt.subplot(1,3,2)\r\n plt.plot(v2, t_plot)\r\n plt.scatter(v,t, color='red')\r\n plt.gca().invert_yaxis()\r\n plt.ylabel('two-way traveltime [ms]')\r\n plt.xlabel('velocity [m/s]')\r\n plt.title(\"time-velocity picks\")\r\n\r\n c = v2\r\n dt = dt*1e-03 # Now time is needed in s\r\n nx = np.min(CMPgather.shape)\r\n\r\n NMOedCMP = np.zeros((nt, nx))\r\n\r\n if smute == 0:\r\n\r\n for ix in range(0, nx):\r\n\r\n off = H_CMPgather[4,ix]\r\n\r\n for it in range(0, nt):\r\n\r\n off2c2 = (off*off)/(c[it]*c[it])\r\n t0 = it * dt\r\n t2 = t0*t0 + off2c2\r\n tnmo = np.sqrt(t2) - t0\r\n itnmo1 = int(np.floor(tnmo/dt))\r\n difft = (tnmo-dt*itnmo1)/dt\r\n\r\n if (it+itnmo1) < nt:\r\n NMOedCMP[it,ix] = (1.-difft)*CMPgather[it+itnmo1-1,ix] + difft*CMPgather[it+itnmo1,ix]\r\n if it+itnmo1 == nt:\r\n NMOedCMP[it, ix] = CMPgather[it+itnmo1-1, ix]\r\n\r\n elif smute!=0:\r\n\r\n for ix in range(0, nx):\r\n\r\n off = H_CMPgather[4,ix]\r\n\r\n for it in range(0, nt):\r\n\r\n off2c2 = (off*off)/(c[it]*c[it])\r\n t0 = it * dt\r\n t02 = t0*t0\r\n t2 = t02 + off2c2\r\n tnmo = np.sqrt(t2)-t0\r\n\r\n if it==0:\r\n dtnmo = 1000.\r\n else:\r\n dtnmo = np.abs(np.sqrt(1+off2c2/t02)) - 1.\r\n\r\n itnmo1 = int(np.floor(tnmo/dt))\r\n difft = (tnmo-dt*itnmo1)/dt\r\n\r\n if (it+itnmo1) < nt:\r\n if dtnmo >= smute:\r\n NMOedCMP[it,ix] = 0.\r\n else:\r\n NMOedCMP[it, ix] = (1.-difft)*CMPgather[it+itnmo1-1,ix] + difft*CMPgather[it+itnmo1,ix]\r\n if it+itnmo1 == nt:\r\n NMOedCMP[it, ix] = CMPgather[it+itnmo1-1,ix]\r\n\r\n plt.subplot(1,3,1)\r\n wiggle(CMPgather)\r\n plt.title('Original CMP gather')\r\n plt.subplot(1,3,3)\r\n wiggle(NMOedCMP)\r\n plt.title('NMO-ed CMP gather')\r\n fig.tight_layout(pad=1.0)\r\n\r\n return NMOedCMP\r\n\r\n\r\ndef nmo_stack(\r\n cmpsorted_data: np.ndarray,\r\n cmpsorted_hdr: np.ndarray,\r\n midpoints: np.ndarray,\r\n folds: np.ndarray,\r\n H: dict,\r\n vmodel: np.ndarray,\r\n smute=0\r\n )->np.ndarray:\r\n '''\r\n zosection = nmo_stack(cmpsorted_data, cmpsorted_hdr, midpoints, folds, H, vmodel, smute=None)\r\n\r\n This function generates a stacked zero-offset section from a CMP-sorted\r\n dataset. First, NMO correction is performed on each CMP-gather, using the\r\n velocity model of the subsurface. Subsequently, each NMO'ed CMP-gather is\r\n stacked to a obtain zero-offset traces on the distances corresponding to\r\n the midpoints of each CMP-gather.\r\n\r\n Parameters\r\n ----------\r\n cmpsorted_data:np.ndarray\r\n CMP-sorted dataset\r\n cmpsorted_hdr: np.ndarray\r\n Its headers\r\n midpoints: np.ndarray\r\n CMP-gather positions (see ANALYSEFOLD)\r\n folds: np.ndarray\r\n CMP-gather folds (see ANALYSEFOLD)\r\n H: dict\r\n Header of the seismic data\r\n vmodel: np.ndarray\r\n Velocity model matrix\r\n\r\n Optional parameters\r\n -------------------\r\n smute: float\r\n Stretch-mute factor (default is 0 which equals no mute)\r\n\r\n Returns\r\n -------\r\n zosection: np.ndarray\r\n Zero-offset stacked seismic section\r\n\r\n Translated to Python from Matlab by Nicolas Vinard and Musab al Hasani, 2019\r\n '''\r\n # Read the amount of time-samples and traces from the size of the data matrix\r\n nt,ntr=cmpsorted_data.shape\r\n\r\n # Amount of cmp gathers equals the length of the midpoint-array\r\n cmpnr = len(midpoints)\r\n\r\n # Initialise tracenr in cmpsorted dataset\r\n tracenr = 0\r\n\r\n # Initialize zosection\r\n zosection = np.zeros((nt, cmpnr))\r\n\r\n print('Processing CMPs. This may take some time...')\r\n print(' ')\r\n\r\n # Update message every tenth percent\r\n printcounter = 0\r\n tenPerc = int(cmpnr/10)\r\n percStatus = 0\r\n\r\n for l in range(0, cmpnr):\r\n\r\n # CMP midpoint in [m] (just for display), and associated fold\r\n midpoint = midpoints[l]\r\n fold = folds[l]\r\n\r\n # positioning in the cmpsorted dataset\r\n gather = cmpsorted_data[:, tracenr:(tracenr+fold)]\r\n gather_hdr = cmpsorted_hdr[:, tracenr:(tracenr+fold)]\r\n\r\n # NMO and stack the selected CMP-gather\r\n nmoed = nmo_vxt(gather, gather_hdr, H, vmodel[:,l], smute)\r\n zotrace = stack_cmp(nmoed)\r\n zosection[:,l] = zotrace[:,0]\r\n\r\n # go to traceposition of next CMP in cmpsorted dataset\r\n tracenr = tracenr + fold\r\n\r\n # Update message\r\n if printcounter == tenPerc:\r\n percStatus += 10\r\n print('Finished stacking {} traces out of {}. {}%'.format(l, cmpnr, percStatus))\r\n printcounter=0\r\n\r\n printcounter+=1\r\n\r\n print('Done')\r\n\r\n return zosection\r\n\r\n\r\n\r\n\r\n'''\r\ndef nmo_stack(\r\n cmpsorted_data: np.ndarray,\r\n cmpsorted_hdr: np.ndarray,\r\n midpoints: np.ndarray,\r\n folds: np.ndarray,\r\n H: dict,\r\n vmodel: np.ndarray,\r\n smute=0\r\n )->np.ndarray:\r\n\r\n \"\"\"\r\n zosection = nmo_stack(cmpsorted_data, cmpsorted_hdr, midpoints, folds, H, vmodel, smute=None)\r\n\r\n This function generates a stacked zero-offset section from a CMP-sorted\r\n dataset. First, NMO correction is performed on each CMP-gather, using the\r\n velocity model of the subsurface. Subsequently, each NMO'ed CMP-gather is\r\n stacked to a obtain zero-offset traces on the distances corresponding to\r\n the midpoints of each CMP-gather.\r\n\r\n Parameters\r\n ----------\r\n cmpsorted_data:np.ndarray\r\n CMP-sorted dataset\r\n cmpsorted_hdr: np.ndarray\r\n Its headers\r\n midpoints: np.ndarray\r\n CMP-gather positions (see ANALYSEFOLD)\r\n folds: np.ndarray\r\n CMP-gather folds (see ANALYSEFOLD)\r\n H: dict\r\n Header of the seismic data\r\n vmodel: np.ndarray\r\n Velocity model matrix\r\n\r\n Optional parameters\r\n -------------------\r\n smute: float\r\n Stretch-mute factor (default is 0 which equals no mute)\r\n\r\n Returns\r\n -------\r\n zosection: np.ndarray\r\n Zero-offset stacked seismic section\r\n\r\n Translated to Python from Matlab by Nicolas Vinard and Musab al Hasani, 2019\r\n\r\n \"\"\"\r\n\r\n # Read the amount of time-samples and traces from the size of the datamatrix\r\n nt,ntr=cmpsorted_data.shape\r\n\r\n # Amount of cmp gathers equals the length of the midpoint-array\r\n cmpnr = len(midpoints)\r\n\r\n # Initialise tracenr in cmpsorted dataset\r\n trace_num = 1\r\n zosection = np.zeros((nt, cmpnr))\r\n\r\n for l in tnrange(cmpnr, desc='Processing CMPs'):\r\n # CMP midpoint in [m] (just for display), and associated fold\r\n midpoint = midpoints[l]\r\n fold = folds[l]\r\n\r\n # positioning in the cmpsorted dataset\r\n gather = cmpsorted_data[:, (trace_num-1):(trace_num+fold)]\r\n gather_hdr = cmpsorted_hdr[:, (trace_num-1):(trace_num+fold)]\r\n\r\n # NMO and stack the selected CMP-gather\r\n nmoed = nmo_vxt(gather, gather_hdr, H, vmodel[:,l], smute)\r\n zotrace = stack_cmp(nmoed)\r\n zosection[:,l] = zotrace[:,0]\r\n\r\n # go to traceposition of next CMP in cmpsorted dataset\r\n trace_num = trace_num + fold\r\n\r\n return zosection\r\n'''\r\n\r\n\r\ndef stackplot(gather: np.ndarray, H: dict)->np.ndarray:\r\n\r\n \"\"\"\r\n stack = stackplot(gather, H)\r\n\r\n This function stacks the traces in a gather and makes a plot of the stacked trace\r\n\r\n Parameters\r\n ----------\r\n gather: np.ndarray\r\n The gather to stack, usually an NMO'ed CMP gather\r\n H: dict\r\n Seimic data header\r\n\r\n Returns\r\n -------\r\n stack: np.ndarray\r\n Stacked trace\r\n\r\n Translated to Python from Matlab by Nicolas Vinard and Musab al Hasani, 2019\r\n\r\n \"\"\"\r\n\r\n gathersize = gather.shape[1]\r\n stack = np.sum(gather,1)/gathersize\r\n t = np.arange(0, H['ns']*H['dt']/1000000, H['dt']/1000000)\r\n d = 0.0\r\n\r\n stack = stack.reshape(len(stack), 1)\r\n gather2 = np.append(gather, stack, axis=1)\r\n\r\n fig = plt.figure(figsize=(12,5))\r\n plt.subplot(131)\r\n wiggle(gather,t=t)\r\n plt.title(\"Input gather\")\r\n plt.subplot(132)\r\n wiggle(gather2, t=t)\r\n plt.title(\"Input gather including stacked trace\")\r\n plt.subplot(133)\r\n plt.plot(stack, t, color='green')\r\n plt.gca().fill_betweenx(t,d,stack[:,0], where=(stack[:,0]>d), color='green')\r\n plt.gca().invert_yaxis()\r\n plt.ylabel(\"time [s]\")\r\n plt.xlabel(\"amplitude\")\r\n plt.title(\"stacked trace\")\r\n fig.tight_layout()\r\n\r\n return stack\r\n\r\n\r\n\r\ndef nmo_vxt(\r\n CMPgather: np.ndarray,\r\n H_CMPgather: np.ndarray,\r\n H: dict,\r\n c: float,\r\n smute=0\r\n )-> np.ndarray:\r\n\r\n \"\"\"\r\n\r\n Parameters\r\n ----------\r\n CMPgather: np.ndarray\r\n CMP gather\r\n H_CMPgather: np.ndarray\r\n Its header\r\n H: dict\r\n Seismic data header\r\n c: float\r\n Velocity in m/s\r\n smute: float\r\n Stretch mute parameters, optional\r\n\r\n Returns\r\n -------\r\n NMOedCMP: np.ndarray\r\n NMO-ed CMP gather\r\n \"\"\"\r\n\r\n dt = H['dt']*1e-6 # time in seconds\r\n nt = H['ns'] # Number of time samples\r\n nx = np.min(CMPgather.shape)\r\n\r\n NMOedCMP = np.zeros((nt, nx))\r\n\r\n if smute == 0:\r\n\r\n for ix in range(0, nx):\r\n\r\n off = H_CMPgather[4,ix]\r\n\r\n for it in range(0, nt):\r\n\r\n off2c2 = (off*off)/(c[it]*c[it])\r\n t0 = it * dt\r\n t2 = t0*t0 + off2c2\r\n tnmo = np.sqrt(t2) - t0\r\n itnmo1 = int(np.floor(tnmo/dt))\r\n difft = (tnmo-dt*itnmo1)/dt\r\n\r\n if (it+itnmo1) < nt:\r\n NMOedCMP[it,ix] = (1.-difft)*CMPgather[it+itnmo1,ix] + difft*CMPgather[it+itnmo1,ix]\r\n\r\n if it+itnmo1 == nt:\r\n NMOedCMP[it, ix] = CMPgather[it+itnmo1-1, ix]\r\n\r\n else:\r\n\r\n for ix in range(0, nx):\r\n\r\n off = H_CMPgather[4,ix]\r\n\r\n for it in range(0, nt):\r\n\r\n off2c2 = (off*off)/(c[it]*c[it])\r\n t0 = it * dt\r\n t02 = t0*t0\r\n t2 = t02 + off2c2\r\n tnmo = np.sqrt(t2)-t0\r\n\r\n if it == 0:\r\n dtnmo = 1000.\r\n else:\r\n dtnmo = np.abs(np.sqrt(1+off2c2/t02)) - 1.\r\n\r\n itnmo1 = int(np.floor(tnmo/dt))\r\n difft = (tnmo-dt*itnmo1)/dt\r\n\r\n if (it+itnmo1) < nt:\r\n if dtnmo >= smute:\r\n NMOedCMP[it,ix] = 0.\r\n else:\r\n NMOedCMP[it, ix] = (1.-difft)*CMPgather[it+itnmo1-1,ix] + difft*CMPgather[it+itnmo1,ix]\r\n\r\n if it+itnmo1 == nt:\r\n NMOedCMP[it, ix] = CMPgather[it+itnmo1-1,ix]\r\n\r\n return NMOedCMP\r\n\r\ndef stack_cmp(gather: np.ndarray)->np.ndarray:\r\n\r\n \"\"\"\r\n\r\n stacked_trace = stack_cmp(gather)\r\n\r\n This function stacks one NMO-ed CMP-gather. Output is one stacked trace\r\n for the midpoint position belonging to the CMP-gather. Is used by the\r\n function NMO_STACK, use STACKPLOT instead.\r\n\r\n Translated to Python from Matlab by Musab al Hasani and Nicolas Vinard, 2019\r\n\r\n \"\"\"\r\n\r\n cmpsize = np.min(gather.shape)\r\n\r\n if cmpsize > 1:\r\n stacked_trace = np.sum(gather,axis=1)\r\n else:\r\n stacked_trace = gather\r\n\r\n stacked_trace=np.reshape(stacked_trace,(len(stacked_trace),1))\r\n\r\n return stacked_trace\r\n\r\ndef generatevmodel2(\r\n cmppicks: list([np.ndarray]),\r\n tvpicks: list([[np.ndarray],[np.ndarray]]),\r\n midpnts: np.ndarray,\r\n H: dict\r\n )->np.ndarray:\r\n\r\n \"\"\"\r\n\r\n vmodel = generatevmodel2(cmppicks, tvpicks, midpnts, H)\r\n\r\n This function generates 2-D velocity model given cmp location, traveltimes,\r\n Header and midpoints. Linear interpolation between inputs.\r\n\r\n Parameters\r\n ----------\r\n cmppicks: list([np.ndarray])\r\n Picked common midpoints, list([cmp positions])\r\n tvpicks: list([[np.ndarray],[np.ndarray]])\r\n Picked traveltimes\r\n midpnts: np.ndarray\r\n Midpoint positons (returned by analysefold)\r\n H: dict\r\n Seismic data header\r\n\r\n Returns\r\n -------\r\n vmodel: np.ndarray\r\n 2-D velocity model\r\n\r\n Written by Nicolas Vinard and Musab al Hasani, 2019\r\n\r\n \"\"\"\r\n\r\n tvLog = tvLogs(cmppicks, tvpicks, H)\r\n t = tvLog[:,:,0]*1000\r\n v = tvLog[:,:,1]\r\n cmppicks = np.array(cmppicks)\r\n\r\n # Calculating CMP sequence numbers [] from midpoint positions [m]\r\n #Note that midpnts is an input argument from generatevmodel, the function calling this script\r\n cmpdist = midpnts[1]-midpnts[0]\r\n cmp_initoffset = midpnts[0]/cmpdist;\r\n vcmp = []\r\n\r\n for a in range(0, len(cmppicks)):\r\n vcmp.append(cmppicks[a]/cmpdist - (cmp_initoffset-1))\r\n\r\n # Preparing the velocity and time matrices for generatevmodel\r\n nrows, ncols = t.shape\r\n t_max=H['dt']/1000*(H['ns']-1) # in ms\r\n\r\n t_up = t\r\n v_up = v\r\n\r\n # Loop over rows, the amount of CMPS\r\n for k in range(0, len(cmppicks)):\r\n\r\n if t[k,0] > 0:\r\n t_up[k,:] = np.insert(t_up, 0, 0)\r\n v_up[k,:] = np.insert(v_up, 0, v[k,0])\r\n else:\r\n if k == 0:\r\n t_up = np.insert(t_up, 1, H['dt']/1000,axis=1)\r\n v_up = np.insert(v_up, 1, v[k,1], axis=1)\r\n else:\r\n t_up[k,0] = t[k,0]\r\n v_up[k,:1] = v[k,:1]\r\n\r\n t = t_up.astype(dtype='int32')*int(1000)\r\n v = v_up.astype(dtype='int32')\r\n\r\n # count CMP midpoints\r\n m=len(midpnts)\r\n\r\n # initialise the vmodel, the columns contain the velocities for each CMP midpnt\r\n vxt = np.zeros((H['ns'],m))\r\n vmodel = np.zeros((H['ns'],m))\r\n vcmp_extended = np.insert(vcmp, 0, 0)\r\n vcmp_extended = np.append(vcmp_extended, m-1)\r\n\r\n v_extended = v\r\n t_extended = t\r\n v_extended = np.append(v_extended, [v[v.shape[0]-1,:]], axis=0)\r\n t_extended = np.append(t_extended, [t[t.shape[0]-1,:]], axis=0)\r\n v_extended = np.insert(v_extended, 0, v[0,:], axis=0)\r\n t_extended = np.insert(t_extended, 0, t[0,:], axis=0)\r\n\r\n vold = np.zeros((H['ns'],np.size(vcmp_extended)))\r\n\r\n for r in range(0, H['ns']):\r\n\r\n for j, k in enumerate(vcmp_extended):\r\n\r\n vxt[:,int(k)] = vlog_exd(v_extended[j,:],t_extended[j,:],H)\r\n vold[r,j]=vxt[r,int(k)]\r\n\r\n # horizontal interpolation between the picked CMP positions\r\n x = vcmp_extended\r\n x2 = np.arange(0,m)\r\n v2 = np.interp(x2, x, vold[r,:])\r\n vmodel[r,:] = v2\r\n\r\n # Plot velocity model\r\n plt.figure()\r\n plt.pcolormesh(midpnts, np.arange(0,H['dt']/1000*H['ns'],H['dt']/1000), vmodel)\r\n plt.xlabel('CMP position [m]')\r\n plt.ylabel('two-way time [ms]')\r\n plt.title('velocity model')\r\n plt.gca().invert_yaxis()\r\n plt.colorbar();\r\n\r\n return vmodel\r\n\r\n\r\n### Helper functions\r\ndef tvLogs(\r\n cmppicks: list([np.ndarray]),\r\n tvpicks: list([[np.ndarray], [np.ndarray]]),\r\n H: dict\r\n )->np.ndarray:\r\n\r\n \"\"\"\r\n tvLog = tvLogs(cmppicks, tvpicks, H)\r\n\r\n This function interpolates the picks along the time dimension\r\n\r\n Written by Nicolas Vinard, 2019\r\n\r\n \"\"\"\r\n\r\n cmppicks=np.array(cmppicks)\r\n time = np.arange(0, H['ns'])*H['dt']*1e-06\r\n tvLog = np.zeros((cmppicks.shape[0], time.shape[0], 2))\r\n tvLog[:,:,0] = time\r\n\r\n for i in range(len(tvpicks[0])):\r\n vInterp = np.interp(time, np.array(tvpicks[0][i]), np.array(tvpicks[1][i]))\r\n tvLog[i,:,1] = vInterp\r\n\r\n return tvLog\r\n\r\n\r\ndef vlog_exd(v:np.ndarray,t:np.ndarray,H:dict):\r\n \"\"\"\r\n Interpolation function used in genereatevmodel2\r\n \"\"\"\r\n\r\n dt=H['dt']\r\n nt=H['ns']\r\n t2 = np.arange(0,nt*dt,dt)\r\n\r\n return np.interp(t2,t,v)\r\n\r\ndef vel_zeroOffset(xs, x1, x2, t1, t2):\r\n\r\n \"\"\"\r\n Compute velocity estimate of zero offset hyperbola\r\n \"\"\"\r\n\r\n velz0 = 2./np.sqrt( np.abs(t2**2 - t1**2) ) * np.sqrt( np.abs( (x2-xs)**2 - (x1-xs)**2 ) )\r\n\r\n return velz0\r\n\r\n\r\n# kirk_mig with fancy update toolbar. uncomment if you want to use it and then comment the other kirk_mig function\r\n\r\n'''\r\ndef kirk_mig(dataIn, vModel, t, x):\r\n\r\n \"\"\"\r\n dataMig, tmig, xmig = kirk_mig(dataIn, vModel, t, x)\r\n\r\n This functions performs Kirchhoff time migration.\r\n\r\n Parameters\r\n ----------\r\n dataIn: np.ndarray\r\n Zero offset data. One trace per column.\r\n vModel: float, np.ndarray (1D), np.ndarray (2D)\r\n Velocity model. Can be in three formats:\r\n 1) float --> constant velocity migration\r\n 2) 1-D np.ndarray --> must have same dimension as the number rows in dataIn.\r\n In this case it is assumed to be an rms velocity function (of time)\r\n which is applied at all positions along the section.\r\n 3) 2-D array --> must have same shape as dataIn. Here it is assumed\r\n to be the rms velocity for each sample location.\r\n\r\n t: float or np.ndarray\r\n Time information. Two possibilies:\r\n (1) scalar: time sample rate in seconds\r\n (2) 1-D np.ndarray: time coordinates for the rows of dataIn.\r\n\r\n x: float or np.ndarray\r\n Spatial information. Two possibilities:\r\n (1) float: spatial sample rate (in units consistent with the velocity information.\r\n (2) 1-D np.ndarray: x-coordinates of the columns of dataIn\r\n\r\n Returns\r\n -------\r\n\r\n dataMig: np.ndarray\r\n The output migrated time section\r\n tmig: np.ndarray\r\n Time coordinates of migrated data\r\n xmig: np.ndarray\r\n Spatial coordinates of migrated data in x\r\n\r\n Tranlated to Python from Matlab by Nicolas Vinard and Musab al Hasani, 2019\r\n \"\"\"\r\n\r\n if np.size(vModel) == 1:\r\n vModel = np.array(vModel)\r\n vModel.shape = (1,1)\r\n\r\n nsamp, ntr = dataIn.shape\r\n nvsamp, nvtr = vModel.shape\r\n\r\n dx = x[1]-x[0]\r\n dt = t[1]-t[0]\r\n\r\n # ---- test velocity info ----\r\n if(nvsamp==1 and nvtr!=1):\r\n # might be transposed vector\r\n if(nvtr==nsamp):\r\n vModel=vModel.T\r\n else:\r\n print('Velocity vector is wrong size')\r\n sys.exit()\r\n\r\n # make velocity matrix\r\n vModel=vModel*np.ones((1,ntr))\r\n\r\n elif( nvsamp==1 and nvtr==1):\r\n vModel=vModel*np.ones((nsamp,ntr))\r\n elif( nvsamp==nsamp and nvtr==1):\r\n vModel=vModel*np.ones((1,ntr))\r\n else:\r\n if(nvsamp!=nsamp):\r\n print('Velocity matrix has wrong number of rows')\r\n sys.exit()\r\n\r\n elif(ntr!=nvtr):\r\n print('Velocity matrix has wrong number of columns')\r\n sys.exit()\r\n\r\n # Now velocity matrix is of same size as data matrix\r\n aper = np.abs(np.max(x)-np.min(x))\r\n width1 = aper/20\r\n itaper1 = 1\r\n ang_limit = np.pi/3\r\n width2 = 0.15*ang_limit\r\n angle1 = ang_limit + width2\r\n itaper2 = 1\r\n interp_type = 1\r\n tmig1 = np.min(t)\r\n tmig2 = np.max(t)\r\n xmig1 = np.min(x)\r\n xmig2 = np.max(x)\r\n ibcfilter = 0\r\n\r\n # Aperature in traces and the taper coefficient\r\n traper0 = 0.5*aper/dx\r\n traper1 = width1/dx\r\n traper = np.asarray(np.round(traper0+traper1), dtype='int')\r\n coef1 = cos_taper(traper0,traper0+traper1)\r\n\r\n # one way time\r\n dt1 = 0.5*dt\r\n t1 = t/2.\r\n t2 = np.power(t1,2)\r\n\r\n # compute maximum time needed\r\n vmin = np.min(vModel)\r\n tmax = np.sqrt( 0.25*tmig2**2 + ((0.5*aper+width1)/vmin)**2)\r\n\r\n # pad input to tmaxin\r\n npad=np.ceil(tmax/dt1)-nsamp+5\r\n\r\n if npad > 0:\r\n npad2 = ((0, int(npad)), (0,0))\r\n dataIn = np.pad(dataIn, pad_width=npad2, mode='constant', constant_values=0)\r\n t1 = np.append(t1, np.arange(nsamp,nsamp+npad)*dt1)\r\n\r\n # output samples targeted ! HERE WE TAKE THE ENTIRE INPUT TIME\r\n t1.shape = (len(t1),1)\r\n tmig=t\r\n samptarget=np.arange(0, len(t))\r\n\r\n # output traces desired ! HERE WE TAKE ALL THE TRACES GIVEN OTHERWISE SEE ORIGINAL CREWES CODE AND ADAPT CODE\r\n trtarget = np.arange(0, len(x))\r\n xmig=x\r\n\r\n # initialize output array\r\n dataMig=np.zeros((len(samptarget), len(trtarget)))\r\n\r\n #loop over migrated traces\r\n kmig=0\r\n print(' ')\r\n print(' --- Total number of traces to be migrated : ' + np.str(len(xmig)) + ' --- ')\r\n print(' ')\r\n\r\n printcounter = 0\r\n tenPerc = int(len(trtarget)/10)\r\n percStatus = 0\r\n\r\n for ktr in tnrange(len(trtarget), desc='Migrating data'):\r\n\r\n ktr2 = trtarget[ktr]\r\n\r\n # determine traces in aperture\r\n n1=np.max((0, ktr-traper))\r\n n2=np.min((ntr, ktr+traper))\r\n truse = np.arange(n1,n2)\r\n\r\n # offsets and velocity\r\n offset2=np.power(((truse-ktr)*dx),2)\r\n v2 = np.power(vModel[:,ktr],2)\r\n\r\n for kaper in range(0, len(truse)):\r\n\r\n # offset times\r\n t_aper = np.sqrt(np.divide(offset2[kaper],v2[samptarget]) + t2[samptarget])\r\n\r\n # cosine theta amplitude correction\r\n if truse[kaper] == ktr:\r\n costheta = np.ones(samptarget.shape)\r\n tanalpha = np.zeros(samptarget.shape)\r\n else:\r\n costheta = 0.5*np.divide(tmig,t_aper)\r\n tanalpha = np.sqrt(1-np.power(costheta,2))\r\n\r\n # angle limit and the taper\r\n ind = np.where( costheta < np.cos(angle1))[0]\r\n i1 = ind[len(ind)-1]\r\n ind = np.where( costheta < np.cos(ang_limit))[0]\r\n i2 = ind[len(ind)-1]\r\n\r\n if i1 < i2:\r\n coef2 = cos_taper(i2,i1)\r\n costheta[0:i1+1] = np.zeros((i1+1))\r\n costheta[i1+1:i2+1] = np.multiply( np.flip(coef2,axis=0)[i2-i1:],costheta[i1+1:i2+1])\r\n\r\n tmp0 = dataIn[:,truse[kaper]]\r\n\r\n # Linear interpolation ONLY OPTION FOR NOW. CAN BE EXTENDED TO OTHER SCHEMES IF NECESSARY\r\n tnumber = t_aper/dt1\r\n it0 = np.array(np.floor( tnumber ), dtype='int')\r\n it1 = np.array(it0+1,dtype='int')\r\n xt0 = np.array(tnumber - it0+1, dtype='int')\r\n xt1 = np.array(it0-tnumber, dtype='int')\r\n tmp = np.multiply(xt1,tmp0[it0])+np.multiply(xt0, tmp0[it1])\r\n\r\n # aperture taper\r\n ccoef = 1.\r\n\r\n if np.abs(truse[kaper]-ktr)*dx > 0.5*aper:\r\n ccoef = coef1[int(np.round(np.abs(truse[kaper]-ktr)-traper0-1))]\r\n if np.abs(1-ccoef) > 0.05:\r\n tmp = np.multiply(tmp, ccoef)\r\n\r\n ind = np.where( costheta < 0.999)[0]\r\n costheta[ind] = np.sqrt(np.power(costheta[ind],3))\r\n tmp[ind] = np.multiply(tmp[ind], costheta[ind])\r\n dataMig[:,kmig] = dataMig[:,kmig]+tmp\r\n\r\n # scaling and 45 degree phase shift\r\n scalemig = np.multiply(vModel[samptarget,kmig],np.sqrt(np.multiply(np.pi,(tmig+0.0001))))\r\n dataMig[:,kmig] = np.divide(dataMig[:,kmig],scalemig)\r\n kmig+=1 # numerator\r\n\r\n # 45 degree phase shift\r\n dataMig = conv45(dataMig)\r\n\r\n return dataMig, tmig, xmig\r\n'''\r\ndef cos_taper(sp,ep,samp=1):\r\n\r\n dd=[]\r\n sp = sp+1\r\n ep = ep+1\r\n l = np.abs(ep-sp)/samp\r\n l = l+1\r\n\r\n if l <= 1:\r\n coef = np.asarray([1.0])\r\n if l > 1:\r\n coef = np.zeros(int(l))\r\n dd = 1.0/(l-1)*np.pi*0.5\r\n\r\n for i in range(0,int(l)):\r\n coef[i] = np.cos((i)*dd)\r\n\r\n return coef\r\n\r\ndef conv45(dataIn):\r\n\r\n itrans = 0\r\n nrow,nvol=dataIn.shape\r\n\r\n if nrow == 1:\r\n dataIn=dataIn.T\r\n nrow = nvol\r\n nvol = 1\r\n itrans = 1\r\n\r\n aryout=np.zeros(dataIn.shape)\r\n filt = np.array([-0.0010 -0.0030,-0.0066,-0.0085,-0.0060, -0.0083, -0.0107,\r\n -0.0164,-0.0103,-0.0194,-0.0221,-0.0705,0.0395,-0.2161,-0.3831,\r\n 0.5451,0.4775,-0.1570,0.0130,0.0321,-0.0129]).T\r\n\r\n for j in range(0,nvol):\r\n conv1=np.convolve(dataIn[:,j], filt)\r\n aryout[:,j]=conv1[15:nrow+15]\r\n\r\n if itrans is True:\r\n aryout = aryout.T\r\n\r\n return aryout\r\n\r\n\r\ndef kirk_mig(\r\n dataIn: np.ndarray,\r\n vModel,\r\n t,\r\n x\r\n )->tuple([np.ndarray, np.ndarray, np.ndarray]):\r\n\r\n \"\"\"\r\n dataMig, tmig, xmig = kirk_mig(dataIn, vModel, t, x)\r\n\r\n This functions performs Kirchhoff time migration.\r\n\r\n Parameters\r\n ----------\r\n dataIn: np.ndarray\r\n Zero offset data. One trace per column.\r\n vModel: float, np.ndarray (1D), np.ndarray (2D)\r\n Velocity model. Can be in three formats:\r\n 1) float --> constant velocity migration\r\n 2) 1-D np.ndarray --> must have same dimension as the number rows in dataIn.\r\n In this case it is assumed to be an rms velocity function (of time)\r\n which is applied at all positions along the section.\r\n 3) 2-D array --> must have same shape as dataIn. Here it is assumed\r\n to be the rms velocity for each sample location.\r\n\r\n t: float or np.ndarray\r\n Time information. Two possibilies:\r\n (1) scalar: time sample rate in seconds\r\n (2) 1-D np.ndarray: time coordinates for the rows of dataIn.\r\n\r\n x: float or np.ndarray\r\n Spatial information. Two possibilities:\r\n (1) float: spatial sample rate (in units consistent with the velocity information.\r\n (2) 1-D np.ndarray: x-coordinates of the columns of dataIn\r\n\r\n Returns\r\n -------\r\n\r\n dataMig: np.ndarray\r\n The output migrated time section\r\n tmig: np.ndarray\r\n Time coordinates of migrated data\r\n xmig: np.ndarray\r\n Spatial coordinates of migrated data in x\r\n\r\n Tranlated to Python from Matlab by Nicolas Vinard and Musab al Hasani, 2019\r\n\r\n \"\"\"\r\n\r\n if np.size(vModel) == 1:\r\n vModel = np.array(vModel)\r\n vModel.shape = (1,1)\r\n\r\n nsamp, ntr = dataIn.shape\r\n nvsamp, nvtr = vModel.shape\r\n\r\n dx = x[1]-x[0]\r\n dt = t[1]-t[0]\r\n\r\n # ---- test velocity info ----\r\n if(nvsamp==1 and nvtr!=1):\r\n # might be transposed vector\r\n if(nvtr==nsamp):\r\n vModel=vModel.T\r\n else:\r\n print('Velocity vector is wrong size')\r\n sys.exit()\r\n\r\n # make velocity matrix\r\n vModel=vModel*np.ones((1,ntr))\r\n\r\n elif( nvsamp==1 and nvtr==1):\r\n vModel=vModel*np.ones((nsamp,ntr))\r\n elif( nvsamp==nsamp and nvtr==1):\r\n vModel=vModel*np.ones((1,ntr))\r\n else:\r\n if(nvsamp!=nsamp):\r\n print('Velocity matrix has wrong number of rows')\r\n sys.exit()\r\n elif(ntr!=nvtr):\r\n print('Velocity matrix has wrong number of columns')\r\n sys.exit()\r\n\r\n # Now velocity matrix is of same size as data matrix\r\n aper = np.abs(np.max(x)-np.min(x))\r\n width1 = aper/20\r\n itaper1 = 1\r\n ang_limit = np.pi/3\r\n width2 = 0.15*ang_limit\r\n angle1 = ang_limit + width2\r\n itaper2 = 1\r\n interp_type = 1\r\n tmig1 = np.min(t)\r\n tmig2 = np.max(t)\r\n xmig1 = np.min(x)\r\n xmig2 = np.max(x)\r\n ibcfilter = 0\r\n\r\n # Aperature in traces and the taper coefficient\r\n traper0 = 0.5*aper/dx\r\n traper1 = width1/dx\r\n traper = np.asarray(np.round(traper0+traper1), dtype='int')\r\n coef1 = cos_taper(traper0,traper0+traper1)\r\n\r\n # one way time\r\n dt1 = 0.5*dt\r\n t1 = t/2.\r\n t2 = np.power(t1,2)\r\n\r\n # compute maximum time needed\r\n vmin = np.min(vModel)\r\n tmax = np.sqrt( 0.25*tmig2**2 + ((0.5*aper+width1)/vmin)**2)\r\n\r\n # pad input to tmaxin\r\n npad=np.ceil(tmax/dt1)-nsamp+5\r\n if npad > 0:\r\n npad2 = ((0, int(npad)), (0,0))\r\n dataIn = np.pad(dataIn, pad_width=npad2, mode='constant', constant_values=0)\r\n t1 = np.append(t1, np.arange(nsamp,nsamp+npad)*dt1)\r\n\r\n # output samples targeted ! HERE WE TAKE THE ENTIRE INPUT TIME\r\n t1.shape = (len(t1),1)\r\n tmig=t\r\n samptarget=np.arange(0, len(t))\r\n\r\n # output traces desired ! HERE WE TAKE ALL THE TRACES GIVEN OTHERWISE SEE ORIGINAL CREWES CODE AND ADAPT CODE\r\n trtarget = np.arange(0, len(x))\r\n xmig=x\r\n\r\n # initialize output array\r\n dataMig=np.zeros((len(samptarget), len(trtarget)))\r\n\r\n #loop over migrated traces\r\n kmig=0\r\n print(' ')\r\n print(' --- Total number of traces to be migrated : ' + np.str(len(xmig)) + ' --- ')\r\n print(' ')\r\n\r\n printcounter = 0\r\n tenPerc = int(len(trtarget)/10)\r\n percStatus = 0\r\n\r\n for ktr, ktr2 in enumerate(trtarget): # ktr - location of output trace\r\n\r\n # determine traces in aperture\r\n n1=np.max((0, ktr-traper))\r\n n2=np.min((ntr, ktr+traper))\r\n truse = np.arange(n1,n2)\r\n\r\n # offsets and velocity\r\n offset2=np.power(((truse-ktr)*dx),2)\r\n v2 = np.power(vModel[:,ktr],2)\r\n\r\n # loop over traces in aperture\r\n for kaper in range(0, len(truse)):\r\n\r\n # offset times\r\n t_aper = np.sqrt(np.divide(offset2[kaper],v2[samptarget]) + t2[samptarget])\r\n\r\n # cosine theta amplitude correction\r\n if truse[kaper] == ktr:\r\n costheta = np.ones(samptarget.shape)\r\n tanalpha = np.zeros(samptarget.shape)\r\n else:\r\n costheta = 0.5*np.divide(tmig,t_aper)\r\n tanalpha = np.sqrt(1-np.power(costheta,2))\r\n\r\n # angle limit and the taper\r\n ind = np.where( costheta < np.cos(angle1))[0]\r\n i1 = ind[len(ind)-1]\r\n ind = np.where( costheta < np.cos(ang_limit))[0]\r\n i2 = ind[len(ind)-1]\r\n\r\n if i1 < i2:\r\n coef2 = cos_taper(i2,i1)\r\n costheta[0:i1+1] = np.zeros((i1+1))\r\n costheta[i1+1:i2+1] = np.multiply( np.flip(coef2,axis=0)[i2-i1:],costheta[i1+1:i2+1])\r\n\r\n tmp0 = dataIn[:,truse[kaper]]\r\n\r\n # Linear interpolation ONLY OPTION FOR NOW. CAN BE EXTENDED TO OTHER SCHEMES IF NECESSARY\r\n tnumber = t_aper/dt1\r\n it0 = np.array(np.floor( tnumber ), dtype='int')\r\n it1 = np.array(it0+1,dtype='int')\r\n xt0 = np.array(tnumber - it0+1, dtype='int')\r\n xt1 = np.array(it0-tnumber, dtype='int')\r\n tmp = np.multiply(xt1,tmp0[it0])+np.multiply(xt0, tmp0[it1])\r\n\r\n # aperture taper\r\n ccoef = 1.\r\n if np.abs(truse[kaper]-ktr)*dx > 0.5*aper:\r\n ccoef = coef1[int(np.round(np.abs(truse[kaper]-ktr)-traper0-1))]\r\n if np.abs(1-ccoef) > 0.05:\r\n tmp = np.multiply(tmp, ccoef)\r\n\r\n ind = np.where( costheta < 0.999)[0]\r\n costheta[ind] = np.sqrt(np.power(costheta[ind],3))\r\n tmp[ind] = np.multiply(tmp[ind], costheta[ind])\r\n dataMig[:,kmig] = dataMig[:,kmig]+tmp\r\n\r\n # scaling and 45 degree phase shift\r\n scalemig = np.multiply(vModel[samptarget,kmig],np.sqrt(np.multiply(np.pi,(tmig+0.0001))))\r\n dataMig[:,kmig] = np.divide(dataMig[:,kmig],scalemig)\r\n kmig+=1 # numerator\r\n\r\n # Print progress information\r\n if printcounter == tenPerc:\r\n percStatus += 10\r\n print('Finished migrating {} traces out of {}. {}%'.format(ktr, len(trtarget), percStatus))\r\n printcounter=0\r\n\r\n printcounter+=1\r\n\r\n # 45 degree phase shift\r\n dataMig = conv45(dataMig)\r\n print('Done')\r\n\r\n return dataMig, tmig, xmig\r\n\r\n\r\ndef time2depth_trace(ttrace, vrmsmodel, tt):\r\n\r\n \"\"\"\r\n time2depth: Convert a single trace in the time domain to the depth domaing\r\n using a RMS-velocity model\r\n\r\n Usage:\r\n [ztrace,zz]=time2depth_trace(ttrace,vrmsmodel,tt)\r\n\r\n Output: ztrace - depth-converted trace\r\n zz - depth vector\r\n\r\n Input: ttrace - trace in time domain\r\n vmodel - 1D RMS-velocity model\r\n tt - time vector\r\n\r\n\r\n TIME2DEPTH_TRACE is a Matlab function originally written by Guy Drijkoningen\r\n and translated to Python by Musab Al Hasani.\r\n\r\n \"\"\"\r\n\r\n dt = tt[1] - tt[0]\r\n nt = len(tt)\r\n\r\n vintmodel = np.zeros(nt)\r\n vintmodel[0] = vrmsmodel[0]\r\n\r\n for it in range(1, nt):\r\n v2diff = tt[it]*vrmsmodel[it]**2 - tt[it-1]*vrmsmodel[it-1]**2\r\n vintmodel[it] = np.sqrt(v2diff/dt)\r\n\r\n # determine minumum velocity for minimum sampling in depth z\r\n vrmsmin = np.min(vrmsmodel)\r\n\r\n # take dz as smallest velocity times dt/2 (two-way time):\r\n dz = vrmsmin*dt/2\r\n\r\n # take maximum depth as maximum velocity times tmax/2 (two-way time):\r\n tmax = tt[-1]\r\n vrmsmax = np.max(vrmsmodel)\r\n zmax = vrmsmax*tmax/2\r\n\r\n nz = int(np.ceil(zmax/dz+1))\r\n zmax2 = nz*dz\r\n zz = np.arange(dz, zmax2, dz)\r\n\r\n # now we need to interpolate to regulaa np.range(dz, zmax) with dz step\r\n\r\n ztrace = np.zeros((nz))\r\n ztrace[0] = ttrace[0]\r\n itrun = 0\r\n z1 = 0.0\r\n z2 = zmax2\r\n\r\n for iz in range(1,int(nz)):\r\n\r\n ztrue = iz*dz\r\n\r\n # find out between which time samples are needed for interpolation:\r\n if itrun < nt:\r\n z2 = z1 + (vintmodel[itrun-1]*dt/2)\r\n while ztrue > z2 and itrun < nt:\r\n\r\n itrun = itrun +1\r\n z1 = z2\r\n z2 = z2 + vintmodel[itrun-1]*dt/2\r\n\r\n if itrun < nt:\r\n ztrace[iz] = (z2-ztrue)/(z2-z1)*ttrace[itrun-1] + (ztrue-z1)/(z2-z1)*ttrace[itrun]\r\n\r\n print('Done!')\r\n\r\n return ztrace, zz\r\n\r\n\r\n\r\n\r\n\r\ndef time2depth_section(tsection, vrmsmodel, tt):\r\n\r\n \"\"\"\r\n\r\n time2depth: Convert a time-migrated section to a depth\r\n section, using a RMS-velocity model\r\n\r\n Usage:\r\n [zmigsection,zz]=time2depth_SECTION(tmigsection,vrmsmodel,tt)\r\n\r\n Output: zsection - depth-converted time-migrated section\r\n zz - depth vector\r\n\r\n Input: tsection - time (possibly time-migrated) section\r\n vmodel - RMS-velocity model\r\n tt - time vector\r\n\r\n\r\n TIME2DEPTH_SECTION is a Matlab function originally written by Guy Drijkoningen\r\n and translated to Python by Musab Al Hasani.\r\n\r\n \"\"\"\r\n\r\n dt = tt[1] - tt[0]\r\n nt = len(tt)\r\n nx = tsection.shape[1]\r\n\r\n vintmodel = np.zeros((nt,nx))\r\n ix = 0\r\n vintmodel[0,ix] = vrmsmodel[0,ix]\r\n\r\n for it in range(1, nt):\r\n v2diff = tt[it]*vrmsmodel[it, ix]**2 - tt[it-1]*vrmsmodel[it-1, ix]**2\r\n vintmodel[it, ix] = np.sqrt(v2diff/dt)\r\n\r\n for ix in range(1,nx):\r\n vintmodel[0,ix] = vrmsmodel[0,ix]\r\n for it in range(1,nt):\r\n v2diff = tt[it]*vrmsmodel[it,ix]**2 - tt[it-1]*vrmsmodel[it-1,ix]**2\r\n vintmodel[it,ix] = np.sqrt(v2diff/dt)\r\n\r\n # determine minumum velocity for minimum sampling in depth z\r\n vrmsmin = np.min(vrmsmodel)\r\n\r\n # take dz as smallest velocity times dt/2 (two-way time):\r\n dz = vrmsmin*dt/2\r\n\r\n # take maximum depth as maximum velocity times tmax/2 (two-way time):\r\n tmax = tt[len(tt)-1]\r\n vrmsmax = np.max(vrmsmodel)\r\n zmax = vrmsmax*tmax/2\r\n nz = int(np.ceil(zmax/dz+1))\r\n zmax2 = nz*dz\r\n zz = np.arange(dz, zmax2+dz, dz)\r\n\r\n print(' ')\r\n print(' --- Total number of traces to be converted to depth: ' + np.str(nx) + ' --- ')\r\n print(' ')\r\n\r\n # now we need to interpolate to regulaa np.range(dz, zmax) with dz step\r\n zsection = np.zeros((nz, nx))\r\n\r\n printcounter = 0\r\n tenPerc = int(nx/10)\r\n percStatus = 0\r\n\r\n for ix in range(0,nx):\r\n\r\n zsection[0,ix] = tsection[0,ix]\r\n itrun = 0\r\n z1 = 0.0\r\n z2 = zmax2\r\n\r\n for iz in range(1,int(nz)):\r\n\r\n ztrue = iz*dz\r\n\r\n # find out between which time samples are needed for interpolation:\r\n if itrun < nt:\r\n z2 = z1 + (vintmodel[itrun-1, ix]*dt/2)\r\n while ztrue > z2 and itrun < nt:\r\n itrun = itrun +1\r\n z1 = z2\r\n z2 = z2 + vintmodel[itrun-1,ix]*dt/2\r\n\r\n if itrun < nt:\r\n zsection [iz, ix] = (z2-ztrue)/(z2-z1)*tsection[itrun-1, ix] + (ztrue-z1)/(z2-z1)*tsection[itrun, ix]\r\n\r\n if printcounter == tenPerc:\r\n percStatus += 10\r\n print('Finished depth converting {} traces out of {}. {}%'.format(ix, nx, percStatus))\r\n printcounter=0\r\n\r\n printcounter+=1\r\n\r\n print('Done!')\r\n\r\n return zsection, zz\r\n\r\n\r\ndef agc(DataO: np.ndarray, time: np.ndarray, agc_type = 'inst', time_gate = 500e-3):\r\n \"\"\"\r\n agc: applies automatic gain control for a given dataset.\r\n\r\n Usage:\r\n gained_data = agc(data,time,agc_type, time_gate)\r\n\r\n Parameters\r\n -----------\r\n data: np.ndarray\r\n Input seismic data\r\n time: np.ndarray\r\n Time array\r\n agc_type: string <class 'str'>\r\n Type of agc to be applied. Options: 1)'inst': instantanous AGC. 2) 'rms': root-mean-square.\r\n For details, please refere to: https://wiki.seg.org/wiki/Gain_applications\r\n time_gate: float <class 'float'>\r\n Time gate used for agc in sec. Defualt value 500e-3.\r\n\r\n Returns\r\n -------\r\n gained_data: np.ndarray\r\n Data after applying AGC\r\n\r\n AGC is python function written by Musab Al Hasani based on the book of Oz Yilmaz (https://wiki.seg.org/wiki/Gain_applications)\r\n\r\n \"\"\"\r\n data = np.copy(DataO)\r\n\r\n # # calculate nth-percentile\r\n # nth_percentile = np.abs(np.percentile(data, 99))\r\n\r\n # clip data to the value of nth-percentile\r\n # data = np.clip(data, a_min=-nth_percentile, a_max = nth_percentile)\r\n\r\n\r\n num_traces = data.shape[1] # number of traces to apply gain on\r\n gain_data = np.zeros(data.shape) # initialise the gained data 2D array\r\n\r\n # check what type of agc to use\r\n if agc_type == 'rms':\r\n for itrc in range(num_traces):\r\n gain_data[:, itrc] = rms_agc(data[:, itrc], time, time_gate)\r\n\r\n elif agc_type =='inst':\r\n for itrc in range(num_traces):\r\n gain_data[:, itrc] = inst_agc(data[:, itrc], time, time_gate)\r\n\r\n else:\r\n print('Wrong agc type!')\r\n\r\n return gain_data\r\n\r\n\r\n\r\ndef rms_agc(trace: np.ndarray, time: np.ndarray, time_gate=200e-3)-> np.ndarray:\r\n \"\"\"\r\n\r\n rms_agc: apply root-mean-square automatic gain control for a given trace.\r\n\r\n Usage:\r\n gained_trace = agc(data,time,agc_type, time_gate)\r\n\r\n Parameters\r\n -----------\r\n data: np.ndarray\r\n Input seismic trace\r\n time: np.ndarray\r\n Time array\r\n time_gate: float <class 'float'>\r\n Time gate used for agc in sec. Defualt value 200e-3 here, though there is not a typecal value to be used.\r\n\r\n Returns\r\n -------\r\n gained_trace: np.ndarray\r\n trace after applying RMS AGC\r\n\r\n RMS_AGC is python function written by Musab Al Hasani based on the book of Oz Yilmaz (https://wiki.seg.org/wiki/Gain_applications)\r\n\r\n \"\"\"\r\n\r\n # determine time sampling and num of samples\r\n dt = time[1]-time[0]\r\n N = len(trace)\r\n\r\n # determine number of time gates to use\r\n gates_num = int((time[-1]//time_gate)+1)\r\n\r\n # initialise indecies for the coners of the gate\r\n time_gate_1st_ind = 0\r\n time_gate_2nd_ind = int(time_gate/dt)\r\n\r\n\r\n # construct lists for begining and ends of tome gates\r\n start_gate_inds = [(time_gate_1st_ind + i*time_gate_2nd_ind) for i in range(gates_num)]\r\n end_gate_inds = [start_gate_inds[j] + time_gate_2nd_ind for j in range(gates_num)]\r\n\r\n # set last gate to the end sample\r\n end_gate_inds[-1] = N\r\n\r\n # initialise middle gate time and gain function arrays\r\n t_rms_values = np.zeros(gates_num+2)\r\n amp_rms_values = np.zeros(gates_num+2)\r\n\r\n # loop over every gate\r\n ivalue = 1\r\n for istart, iend in zip(start_gate_inds, end_gate_inds):\r\n t_rms_values[ivalue] = 0.5*(istart + iend)\r\n amp_rms_values[ivalue] = np.sqrt(np.mean(np.square(trace[istart:iend])))\r\n ivalue += 1\r\n\r\n # set side values for interpolation\r\n t_rms_values[-1] = N\r\n amp_rms_values[0] = amp_rms_values[1]\r\n amp_rms_values[-1] = amp_rms_values[-2]\r\n\r\n # linear interpolation for the rms amp function for every sample N\r\n rms_func = np.interp(range(N), t_rms_values, amp_rms_values )\r\n\r\n # calculate the gained trace\r\n gained_trace = trace*(np.sqrt(np.mean(np.square(trace)))/rms_func)\r\n\r\n\r\n return gained_trace\r\n\r\n\r\ndef inst_agc(trace, time, time_gate = 500e-3 ):\r\n \"\"\"\r\n\r\n rms_agc: apply instantanous automatic gain control for a given trace.\r\n\r\n Usage:\r\n gained_trace = agc(data,time,agc_type, time_gate)\r\n\r\n Parameters\r\n -----------\r\n data: np.ndarray\r\n Input seismic trace\r\n time: np.ndarray\r\n Time array\r\n time_gate: float <class 'float'>\r\n Time gate used for agc in sec. typecal values between 200-500ms.\r\n\r\n Returns\r\n -------\r\n gained_trace: np.ndarray\r\n trace after applying instansous AGC\r\n\r\n INST_AGC is python function written by Musab Al Hasani based on the book of Oz Yilmaz (https://wiki.seg.org/wiki/Gain_applications)\r\n\r\n \"\"\"\r\n # determine time sampling and num of samples\r\n dt = time[1]-time[0]\r\n N = len(trace)\r\n\r\n # determine the number of sample of a given gate\r\n end_samples = int(time_gate/dt)\r\n\r\n # calculate gates number not including the last end_samples\r\n gates_num = N - end_samples\r\n\r\n # initialise gates begining and end indices\r\n time_gate_1st_ind = 0\r\n time_gate_2nd_ind = int(time_gate/dt)\r\n\r\n # construct lists for indices of gates corners\r\n start_gate_inds = [i for i in range(gates_num)]\r\n end_gate_inds = [start_gate_inds[j] + time_gate_2nd_ind for j in range(gates_num)]\r\n\r\n #initialise gain function\r\n amp_inst_values = np.zeros(N)\r\n\r\n # loop over ever sample to calculate gain function\r\n ivalue = 0\r\n for istart, iend in zip(start_gate_inds, end_gate_inds):\r\n amp_inst_values[ivalue] = np.mean(np.abs(trace[istart:iend]))\r\n ivalue += 1\r\n amp_inst_values[-end_samples:] = (amp_inst_values[ivalue-1])\r\n\r\n # calculate gained trace\r\n gained_trace = trace*(np.sqrt(np.mean(np.square(trace)))/amp_inst_values)\r\n\r\n return gained_trace\r\n\r\ndef calculate_rms_1d(vector):\r\n \"\"\"\r\n calculate rms for a signal trace\r\n used in normalise function.\r\n \"\"\"\r\n sqrs = vector**2\r\n mean = np.mean(sqrs)\r\n return np.sqrt(mean)\r\n\r\ndef rms_2d(matrix):\r\n \"\"\"\r\n calculate rms for a 2D-matrix\r\n used in normalise function.\r\n \"\"\"\r\n \r\n nt, nx = matrix.shape\r\n \r\n rms_vals= np.zeros(nx)\r\n\r\n for i in range(nx):\r\n rms_vals[i] = calculate_rms_1d(matrix[:,i])\r\n \r\n return rms_vals\r\n\r\ndef normalise(input_data: np.ndarray)->np.ndarray:\r\n \"\"\"\r\n normalised_data = normalise(input_data)\r\n\r\n This function normalises each trace by its RMS. \r\n\r\n Parameters\r\n ----------\r\n Input_data: np.ndarray\r\n\r\n Returns\r\n -------\r\n normalised_data: np.ndarray\r\n \r\n\r\n Written to python by Musab Al Hasani, 2021\r\n\r\n \"\"\"\r\n \r\n original_amp = input_data\r\n norm = (1/rms_2d(original_amp))*original_amp\r\n \r\n return norm"
] |
[
[
"numpy.nanmax",
"numpy.sqrt",
"numpy.asarray",
"matplotlib.pyplot.axes",
"numpy.max",
"matplotlib.pyplot.plot",
"numpy.round",
"numpy.mean",
"numpy.where",
"numpy.divide",
"numpy.square",
"matplotlib.pyplot.gca",
"numpy.pad",
"numpy.clip",
"numpy.reshape",
"numpy.arange",
"numpy.less",
"scipy.io.loadmat",
"numpy.lexsort",
"numpy.subtract",
"numpy.ceil",
"numpy.copy",
"matplotlib.pyplot.subplot",
"numpy.size",
"numpy.insert",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplots_adjust",
"numpy.interp",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.min",
"numpy.power",
"numpy.multiply",
"numpy.isnan",
"numpy.append",
"numpy.floor",
"matplotlib.pyplot.pcolormesh",
"numpy.argsort",
"numpy.array",
"numpy.str",
"numpy.sum",
"numpy.flip",
"matplotlib.pyplot.ylabel",
"numpy.convolve",
"numpy.abs",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.subplots",
"numpy.percentile",
"matplotlib.pyplot.draw",
"numpy.argwhere",
"matplotlib.pyplot.colorbar",
"matplotlib.widgets.Slider",
"numpy.cos",
"numpy.ones",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ginput",
"numpy.empty"
]
] |
sadrasabouri/vat_tf
|
[
"b7e36a6efb8f32217455ebcb9df258aaae0e53de"
] |
[
"train_semisup.py"
] |
[
"import time\n\nimport numpy\nimport tensorflow.compat.v1 as tf\n\nimport layers as L\nimport vat\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('device', '/gpu:0', \"device\")\n\ntf.app.flags.DEFINE_string('dataset', 'cifar10', \"{cifar10, svhn}\")\ntf.app.flags.DEFINE_string('logdir', './', \"Logging Directory\")\n\ntf.app.flags.DEFINE_integer('seed', 1, \"initial random seed\")\ntf.app.flags.DEFINE_bool('validation', False, \"\")\n\ntf.app.flags.DEFINE_integer('batch_size', 32, \"the number of examples in a batch\")\ntf.app.flags.DEFINE_integer('ul_batch_size', 128, \"the number of unlabeled examples in a batch\")\ntf.app.flags.DEFINE_integer('eval_batch_size', 100, \"the number of eval examples in a batch\")\ntf.app.flags.DEFINE_integer('eval_freq', 5, \"\")\ntf.app.flags.DEFINE_integer('num_epochs', 120, \"the number of epochs for training\")\ntf.app.flags.DEFINE_integer('epoch_decay_start', 80, \"epoch of starting learning rate decay\")\ntf.app.flags.DEFINE_integer('num_iter_per_epoch', 400, \"the number of updates per epoch\")\ntf.app.flags.DEFINE_float('learning_rate', 0.001, \"initial leanring rate\")\ntf.app.flags.DEFINE_float('mom1', 0.9, \"initial momentum rate\")\ntf.app.flags.DEFINE_float('mom2', 0.5, \"momentum rate after epoch_decay_start\")\n\ntf.app.flags.DEFINE_string('method', 'vat', \"{vat, vatent, baseline}\")\n\n\nif FLAGS.dataset == 'cifar10':\n from cifar10 import inputs, unlabeled_inputs\nelif FLAGS.dataset == 'svhn':\n from svhn import inputs, unlabeled_inputs \nelse: \n raise NotImplementedError\n\n\nNUM_EVAL_EXAMPLES = 5000\n\n\ndef build_training_graph(x, y, ul_x, lr, mom):\n global_step = tf.get_variable(\n name=\"global_step\",\n shape=[],\n dtype=tf.float32,\n initializer=tf.constant_initializer(0.0),\n trainable=False,\n )\n logit = vat.forward(x)\n nll_loss = L.ce_loss(logit, y)\n with tf.variable_scope(tf.get_variable_scope(), reuse=True):\n if FLAGS.method == 'vat':\n ul_logit = vat.forward(ul_x, is_training=True, update_batch_stats=False)\n vat_loss = vat.virtual_adversarial_loss(ul_x, ul_logit)\n additional_loss = vat_loss\n elif FLAGS.method == 'vatent':\n ul_logit = vat.forward(ul_x, is_training=True, update_batch_stats=False)\n vat_loss = vat.virtual_adversarial_loss(ul_x, ul_logit)\n ent_loss = L.entropy_y_x(ul_logit)\n additional_loss = vat_loss + ent_loss\n elif FLAGS.method == 'baseline':\n additional_loss = 0\n else:\n raise NotImplementedError\n loss = nll_loss + additional_loss\n\n opt = tf.train.AdamOptimizer(learning_rate=lr, beta1=mom)\n tvars = tf.trainable_variables()\n grads_and_vars = opt.compute_gradients(loss, tvars)\n train_op = opt.apply_gradients(grads_and_vars, global_step=global_step)\n return loss, train_op, global_step\n\n\ndef build_eval_graph(x, y, ul_x):\n losses = {}\n logit = vat.forward(x, is_training=False, update_batch_stats=False)\n nll_loss = L.ce_loss(logit, y)\n losses['NLL'] = nll_loss\n acc = L.accuracy(logit, y)\n losses['Acc'] = acc\n scope = tf.get_variable_scope()\n scope.reuse_variables()\n at_loss = vat.adversarial_loss(x, y, nll_loss, is_training=False)\n losses['AT_loss'] = at_loss\n ul_logit = vat.forward(ul_x, is_training=False, update_batch_stats=False)\n vat_loss = vat.virtual_adversarial_loss(ul_x, ul_logit, is_training=False)\n losses['VAT_loss'] = vat_loss\n return losses\n\n\ndef main(_):\n print(FLAGS.epsilon, FLAGS.top_bn)\n numpy.random.seed(seed=FLAGS.seed)\n tf.set_random_seed(numpy.random.randint(1234))\n with tf.Graph().as_default() as g:\n with tf.device(\"/cpu:0\"):\n images, labels = inputs(batch_size=FLAGS.batch_size,\n train=True,\n validation=FLAGS.validation,\n shuffle=True)\n ul_images = unlabeled_inputs(batch_size=FLAGS.ul_batch_size,\n validation=FLAGS.validation,\n shuffle=True)\n\n images_eval_train, labels_eval_train = inputs(batch_size=FLAGS.eval_batch_size,\n train=True,\n validation=FLAGS.validation,\n shuffle=True)\n ul_images_eval_train = unlabeled_inputs(batch_size=FLAGS.eval_batch_size,\n validation=FLAGS.validation,\n shuffle=True)\n\n images_eval_test, labels_eval_test = inputs(batch_size=FLAGS.eval_batch_size,\n train=False,\n validation=FLAGS.validation,\n shuffle=True)\n\n with tf.device(FLAGS.device):\n lr = tf.placeholder(tf.float32, shape=[], name=\"learning_rate\")\n mom = tf.placeholder(tf.float32, shape=[], name=\"momentum\")\n with tf.variable_scope(\"CNN\") as scope:\n # Build training graph\n loss, train_op, global_step = build_training_graph(images, labels, ul_images, lr, mom)\n scope.reuse_variables()\n # Build eval graph\n losses_eval_train = build_eval_graph(images_eval_train, labels_eval_train, ul_images_eval_train)\n losses_eval_test = build_eval_graph(images_eval_test, labels_eval_test, images_eval_test)\n\n init_op = tf.global_variables_initializer()\n\n logdir = FLAGS.logdir\n writer_train = None\n writer_test = None\n\n saver = tf.train.Saver(tf.global_variables())\n sv = tf.train.Supervisor(\n is_chief=True,\n logdir=logdir,\n init_op=init_op,\n init_feed_dict={lr: FLAGS.learning_rate, mom: FLAGS.mom1},\n saver=saver,\n global_step=global_step,\n summary_op=None,\n summary_writer=None,\n save_model_secs=150, recovery_wait_secs=0)\n\n print(\"Training...\")\n with sv.managed_session() as sess:\n for ep in range(FLAGS.num_epochs):\n if sv.should_stop():\n break\n\n if ep < FLAGS.epoch_decay_start:\n feed_dict = {lr: FLAGS.learning_rate, mom: FLAGS.mom1}\n else:\n decayed_lr = ((FLAGS.num_epochs - ep) / float(\n FLAGS.num_epochs - FLAGS.epoch_decay_start)) * FLAGS.learning_rate\n feed_dict = {lr: decayed_lr, mom: FLAGS.mom2}\n\n sum_loss = 0\n start = time.time()\n for i in range(FLAGS.num_iter_per_epoch):\n _, batch_loss, _ = sess.run([train_op, loss, global_step],\n feed_dict=feed_dict)\n sum_loss += batch_loss\n end = time.time()\n print(\"Epoch:\", ep, \"CE_loss_train:\", sum_loss / FLAGS.num_iter_per_epoch, \"elapsed_time:\", end - start)\n\n if (ep + 1) % FLAGS.eval_freq == 0 or ep + 1 == FLAGS.num_epochs:\n # Eval on training data\n act_values_dict = {}\n for key, _ in losses_eval_train.items():\n act_values_dict[key] = 0\n n_iter_per_epoch = int(NUM_EVAL_EXAMPLES / FLAGS.eval_batch_size)\n for i in range(n_iter_per_epoch):\n values = list(losses_eval_train.values())\n act_values = sess.run(values)\n for key, value in zip(act_values_dict.keys(), act_values):\n act_values_dict[key] += value\n summary = tf.Summary()\n current_global_step = sess.run(global_step)\n for key, value in act_values_dict.items():\n print(\"train-\" + key, value / n_iter_per_epoch)\n summary.value.add(tag=key, simple_value=value / n_iter_per_epoch)\n if writer_train is not None:\n writer_train.add_summary(summary, current_global_step)\n\n # Eval on test data\n act_values_dict = {}\n for key, _ in losses_eval_test.items():\n act_values_dict[key] = 0\n n_iter_per_epoch = int(NUM_EVAL_EXAMPLES / FLAGS.eval_batch_size)\n for i in range(n_iter_per_epoch):\n values = list(losses_eval_test.values())\n act_values = sess.run(values)\n for key, value in zip(act_values_dict.keys(), act_values):\n act_values_dict[key] += value\n summary = tf.Summary()\n current_global_step = sess.run(global_step)\n for key, value in act_values_dict.items():\n print(\"test-\" + key, value / n_iter_per_epoch)\n summary.value.add(tag=key, simple_value=value / n_iter_per_epoch)\n if writer_test is not None:\n writer_test.add_summary(summary, current_global_step)\n\n saver.save(sess, sv.save_path, global_step=global_step)\n sv.stop()\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n"
] |
[
[
"tensorflow.compat.v1.app.flags.DEFINE_bool",
"tensorflow.compat.v1.train.Supervisor",
"numpy.random.randint",
"tensorflow.compat.v1.app.run",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.global_variables",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.constant_initializer",
"tensorflow.compat.v1.get_variable_scope",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.app.flags.DEFINE_string",
"tensorflow.compat.v1.app.flags.DEFINE_integer",
"tensorflow.compat.v1.Graph",
"tensorflow.compat.v1.app.flags.DEFINE_float",
"tensorflow.compat.v1.device",
"numpy.random.seed",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.Summary"
]
] |
ElijahAhianyo/advertools
|
[
"5d3d3c88bfb76fe2917f3583de8fe874e26a5900"
] |
[
"advertools/logs.py"
] |
[
"\"\"\"\n.. _logs:\n\nLog File Analysis (experimental)\n================================\n\nLogs contain very detailed information about events happening on computers.\nAnd the extra details that they provide, come with additional complexity that\nwe need to handle ourselves. A pageview may contain many log lines, and a\nsession can consist of several pageviews for example.\n\nAnother important characterisitic of log files is that their are usualy not\nbig.\nThey are massive.\n\nSo, we also need to cater for their large size, as well as rapid changes.\n\nTL;DR\n\n>>> import advertools as adv\n>>> import pandas as pd\n>>> adv.logs_to_df(log_file='access.log',\n... output_file='access_logs.parquet',\n... errors_file='log_errors.csv',\n... log_format='common',\n... fields=None)\n>>> logs_df = pd.read_parquet('access_logs.parquet')\n\nHow to run the :func:`logs_to_df` function:\n-------------------------------------------\n\n* ``log_file``: The path to the log file you are trying to analyze.\n* ``output_file``: The path to where you want the parsed and compressed file\n to be saved. Only the `parquet` format is supported.\n* ``errors_file``: You will almost certainly have log lines that don't conform\n to the format that you have, so all lines that weren't properly parsed would\n go to this file. This file also contains the error messages, so you know what\n went wrong, and how you might fix it. In some cases, you might simply take\n these \"errors\" and parse them again. They might not be really errors, but\n lines in a different format, or temporary debug messages.\n* ``log_format``: The format in which your logs were formatted. Logs can (and\n are) formatted in many ways, and there is no right or wrong way. However,\n there are defaults, and a few popular formats that most servers use. It is\n likely that your file is in one of the popular formats. This parameter can\n take any one of the pre-defined formats, for example \"common\", or \"extended\",\n or a regular expression that you provide. This means that you can parse any\n log format (as long as lines are single lines, and not formatted in JSON).\n* ``fields``: If you selected one of the supported formats, then there is no\n need to provide a value for this parameter. You have to provide a list of\n fields in case you provide a custom (regex) format. The fields will become\n the names of the columns of the resulting DataFrame, so you can distinguish\n between them (client, time, status code, response size, etc.)\n\nSupported Log Formats\n---------------------\n\n* `common`\n* `combined` (a.k.a \"extended\")\n* `common_with_vhost`\n* `nginx_error`\n* `apache_error`\n\n\n\nParse and Analyze Crawl Logs in a Dataframe\n===========================================\n\nWhile crawling with the :func:`crawl` function, the process produces logs for\nevery page crawled, scraped, redirected, and even blocked by robots.txt rules.\n\nBy default, those logs are can be seen on the command line as their default\ndestination is stdout.\n\nA good practice is to set a ``LOG_FILE`` so you can save those logs to a text\nfile, and review them later. There are several reasons why you might want to do\nthat:\n\n* Blocked URLs: The crawler obeys robots.txt rules by default, and when it\n encounters pages that it shouldn't crawl, it doesn't. However, this is logged\n as an event, and you can easily extract a list of blocked URLs from the logs.\n* Crawl errors: You might also get some errors while crawling, and it can be\n interesting to know which URLs generated errors.\n* Filtered pages: Those are pages that were discovered but weren't crawled\n because they are not a sub-domain of the provided url_list, or happen to be\n on external domains altogether.\n\nThis can simply be done by specifying a file name through the optional\n`custom_settings` parameter of ``crawl``:\n\n>>> import advertools as adv\n>>> adv.crawl('https://example.com',\n output_file='example.jl',\n follow_links=True,\n custom_settings={'LOG_FILE': 'example.log'})\n\nIf you run it this way, all logs will be saved to the file you chose,\n`example.log` in this case.\n\nNow, you can use the :func:`crawllogs_to_df` function to open the logs in a\nDataFrame:\n\n>>> import advertools as adv\n>>> logs_df = adv.crawllogs_to_df('example.log')\n\n\nThe DataFrame might contain the following columns:\n\n* `time`: The timestamp for the process\n* `middleware`: The middleware responsible for this process, whether it is the\n core engine, the scraper, error handler and so on.\n* `level`: The logging level (DEBUG, INFO, etc.)\n* `message`: A single word summarizing what this row represents, \"Crawled\",\n \"Scraped\", \"Filtered\", and so on.\n* `domain`: The domain name of filtered (not crawled pages) typically for URLs\n outside the current website.\n* `method`: The HTTP method used in this process (GET, PUT, etc.)\n* `url`: The URL currently under process.\n* `status`: HTTP status code, 200, 404, etc.\n* `referer`: The referring URL, where applicable.\n* `method_to`: In redirect rows the HTTP method used to crawl the URL going to.\n* `redirect_to`: The URL redirected to.\n* `method_from`: In redirect rows the HTTP method used to crawl the URL coming\n from.\n* `redirect_from`: The URL redirected from.\n* `blocked_urls`: The URLs that were not crawled due to robots.txt rules.\n\n\"\"\"\nimport os\nimport re\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\nimport pandas as pd\n\nLOG_FORMATS = {\n 'common': r'^(?P<client>\\S+) \\S+ (?P<userid>\\S+) \\[(?P<datetime>[^\\]]+)\\] \"(?P<method>[A-Z]+) (?P<request>[^ \"]+)? HTTP/[0-9.]+\" (?P<status>[0-9]{3}) (?P<size>[0-9]+|-)$',\n 'combined': r'^(?P<client>\\S+) \\S+ (?P<userid>\\S+) \\[(?P<datetime>[^\\]]+)\\] \"(?P<method>[A-Z]+) (?P<request>[^ \"]+)? HTTP/[0-9.]+\" (?P<status>[0-9]{3}) (?P<size>[0-9]+|-) \"(?P<referrer>[^\"]*)\" \"(?P<useragent>[^\"]*)\"$',\n 'common_with_vhost': r'^(?P<vhost>\\S+) (?P<client>\\S+) \\S+ (?P<userid>\\S+) \\[(?P<datetime>[^\\]]+)\\] \"(?P<method>[A-Z]+) (?P<request>[^ \"]+)? HTTP/[0-9.]+\" (?P<status>[0-9]{3}) (?P<size>[0-9]+|-)$',\n 'nginx_error': r'^(?P<datetime>\\d{4}/\\d\\d/\\d\\d \\d\\d:\\d\\d:\\d\\d) \\[(?P<level>[^\\]]+)\\] (?P<pid>\\d+)#(?P<tid>\\d+): (?P<counter>\\*\\d+ | )?(?P<message>.*)',\n 'apache_error': r'^(?P<datetime>\\[[^\\]]+\\]) (?P<level>\\[[^\\]]+\\]) \\[pid (?P<pid>\\d+)\\] (?P<file>\\S+):(?P<status> \\S+| ):? \\[client (?P<client>\\S+)\\] (?P<message>.*)',\n}\n\nLOG_FIELDS = {\n 'common': ['client', 'userid', 'datetime', 'method', 'request', 'status',\n 'size'],\n 'combined': ['client', 'userid', 'datetime', 'method', 'request', 'status',\n 'size', 'referer', 'user_agent'],\n 'common_with_vhost': ['virtual_host', 'client', 'userid', 'datetime',\n 'method', 'request', 'status', 'size'],\n 'nginx_error': ['datetime', 'level', 'process_id', 'thread_id', 'counter',\n 'message'],\n 'apache_error': ['datetime', 'level', 'process_id', 'file', 'status',\n 'client', 'message'],\n}\n\n\ndef logs_to_df(log_file, output_file, errors_file, log_format, fields=None):\n \"\"\"Parse and compress any log file into a DataFrame format.\n\n Convert a log file to a `parquet` file in a DataFrame format, and save all\n errors (or lines not conformig to the chosen log format) into a separate\n ``errors_file`` text file. Any non-JSON log format is possible, provided\n you have the right regex for it. A few default ones are provided and can be\n used. Check out ``adv.LOG_FORMATS`` and ``adv.LOG_FIELDS`` for the\n available formats and fields.\n\n >>> import advertools as adv\n >>> import pandas as pd\n >>> adv.logs_to_df(log_file='access.log',\n ... output_file='access_logs.parquet',\n ... errors_file='log_errors.csv',\n ... log_format='common',\n ... fields=None)\n >>> logs_df = pd.read_parquet('access_logs.parquet')\n\n You can now analyze ``logs_df`` as a normal pandas DataFrame.\n\n :param str log_file: The path to the log file.\n :param str output_file: The path to the desired output file. Must have a\n \".parquet\" extension, and must not have the same\n path as an existing file. \n :param str errors_file: The path where the parsing errors are stored. Any\n text format works, CSV is recommended to easily\n open it with any CSV reader with the separator as \n \"@@\".\n :param str log_format: Either the name of one of the supported log formats,\n or a regex of your own format.\n :param str fields: A list of fields, which will become the names of columns\n in ``output_file``. Only required if you provide a\n custom (regex) ``log_format``.\n\n \"\"\"\n if not output_file.endswith('.parquet'):\n raise ValueError(\"Please provide an `output_file` with a `.parquet` \"\n \"extension.\")\n for file in [output_file, errors_file]:\n if os.path.exists(file):\n raise ValueError(f\"The file '{file}' already exists. \"\n \"Please rename it, delete it, or choose another \"\n \"file name/path.\")\n\n regex = LOG_FORMATS.get(log_format) or log_format\n columns = fields or LOG_FIELDS[log_format]\n with TemporaryDirectory() as tempdir:\n tempdir_name = Path(tempdir)\n with open(log_file) as source_file:\n linenumber = 0\n parsed_lines = []\n for line in source_file:\n linenumber += 1\n try:\n log_line = re.findall(regex, line)[0]\n parsed_lines.append(log_line)\n except Exception as e:\n with open(errors_file, 'at') as err:\n err_line = line[:-1] if line.endswith('\\n') else line\n print('@@'.join([str(linenumber), err_line, str(e)]),\n file=err)\n pass\n if linenumber % 250_000 == 0:\n print(f'Parsed {linenumber:>15,} lines.', end='\\r')\n df = pd.DataFrame(parsed_lines, columns=columns)\n df.to_parquet(tempdir_name / f'file_{linenumber}.parquet')\n parsed_lines.clear()\n else:\n print(f'Parsed {linenumber:>15,} lines.', end='\\r')\n df = pd.DataFrame(parsed_lines, columns=columns)\n df.to_parquet(tempdir_name / f'file_{linenumber}.parquet')\n final_df = pd.read_parquet(tempdir_name)\n try:\n final_df['status'] = final_df['status'].astype('category')\n final_df['method'] = final_df['method'].astype('category')\n final_df['size'] = pd.to_numeric(final_df['size'],\n downcast='signed')\n except KeyError:\n pass\n final_df.to_parquet(output_file)\n\n\ndef crawllogs_to_df(logs_file_path):\n \"\"\"Convert a crawl logs file to a DataFrame.\n\n An interesting option while using the ``crawl`` function, is to specify a\n destination file to save the logs of the crawl process itself. This contains\n additional information about each crawled, scraped, blocked, or redirected\n URL.\n\n What you would most likely use this for is to get a list of URLs blocked by\n robots.txt rules. These can be found und the column ``blocked_urls``.\n Crawling errors are also interesting, and can be found in rows where\n ``message`` is equal to \"error\".\n\n >>> import advertools as adv\n >>> adv.crawl('https://example.com',\n output_file='example.jl',\n follow_links=True,\n custom_settings={'LOG_FILE': 'example.log'})\n >>> logs_df = adv.crawl_logs_to_df('example.log')\n\n\n :param str logs_file_path: The path to the logs file.\n\n :returns DataFrame crawl_logs_df: A DataFrame summarizing the logs.\n \"\"\"\n time_middleware_level = \"(\\d{4}-\\d\\d-\\d\\d \\d\\d:\\d\\d:\\d\\d) \\[(.*?)\\] ([A-Z]+): \"\n time_middleware_level_error = \"(\\d{4}-\\d\\d-\\d\\d \\d\\d:\\d\\d:\\d\\d) \\[(.*?)\\] (ERROR): \"\n\n filtered_regex = time_middleware_level + \"(Filtered) offsite request to '(.*?)': <([A-Z]+) (.*?)>\" \n filtered_cols = ['time', 'middleware', 'level', 'message', 'domain', 'method', 'url']\n\n crawled_regex = time_middleware_level + \"(Crawled) \\((\\d\\d\\d)\\) <([A-Z]+) (.*?)> \\(referer: (.*?)\\)\" \n crawled_cols = ['time', 'middleware', 'level', 'message', 'status', 'method', 'url', 'referer']\n\n scraped_regex = time_middleware_level + \"(Scraped) from <(\\d\\d\\d) (.*?)>\" \n scraped_cols = ['time', 'middleware', 'level', 'message', 'status', 'url']\n\n redirect_regex = time_middleware_level + \"(Redirect)ing \\((\\d\\d\\d)\\) to <([A-Z]+) (.*?)> from <([A-Z]+) (.*?)>\"\n redirect_cols = ['time', 'middleware', 'level', 'message', 'status', 'method_to', 'redirect_to', 'method_from', 'redirect_from']\n\n blocked_regex = time_middleware_level + \"(Forbidden) by robots\\.txt: <([A-Z]+) (.*?)>\"\n blocked_cols = ['time', 'middleware', 'level', 'message', 'method', 'blocked_urls']\n\n error_regex = time_middleware_level + \"Spider (error) processing <([A-Z]+) (.*?)> \\(referer: (.*?)\\)\"\n error_cols = ['time', 'middleware', 'level', 'message', 'method', 'url', 'referer']\n\n error_level_regex = time_middleware_level_error + '(.*)? (\\d\\d\\d) (http.*)'\n error_level_cols = ['time', 'middleware', 'level', 'message', 'status', 'url']\n\n filtered_lines = []\n crawled_lines = []\n scraped_lines = []\n redirect_lines = []\n blocked_lines = []\n error_lines = []\n error_lvl_lines = []\n\n with open(logs_file_path) as file:\n for line in file:\n if re.findall(filtered_regex, line):\n filtered_lines.append(re.findall(filtered_regex, line)[0])\n if re.findall(crawled_regex, line):\n crawled_lines.append(re.findall(crawled_regex, line)[0])\n if re.findall(scraped_regex, line):\n scraped_lines.append(re.findall(scraped_regex, line)[0])\n if re.findall(redirect_regex, line):\n redirect_lines.append(re.findall(redirect_regex, line)[0])\n if re.findall(blocked_regex, line):\n blocked_lines.append(re.findall(blocked_regex, line)[0])\n if re.findall(error_regex, line):\n error_lines.append(re.findall(error_regex, line)[0])\n if re.findall(error_level_regex, line):\n error_lvl_lines.append(re.findall(error_level_regex, line)[0])\n\n final_df = pd.concat([\n pd.DataFrame(filtered_lines, columns=filtered_cols),\n pd.DataFrame(crawled_lines, columns=crawled_cols),\n pd.DataFrame(scraped_lines, columns=scraped_cols),\n pd.DataFrame(redirect_lines, columns=redirect_cols),\n pd.DataFrame(blocked_lines, columns=blocked_cols),\n pd.DataFrame(error_lines, columns=error_cols),\n pd.DataFrame(error_lvl_lines, columns=error_level_cols),\n ])\n\n final_df['time'] = pd.to_datetime(final_df['time'])\n final_df = final_df.sort_values('time').reset_index(drop=True)\n\n return final_df\n"
] |
[
[
"pandas.read_parquet",
"pandas.to_datetime",
"pandas.to_numeric",
"pandas.DataFrame"
]
] |
lucasmansilla/multiatlas-landmark
|
[
"0295a4471ca8f08040885f105b4c67e8f450c57e"
] |
[
"scripts/get_labels_from_points.py"
] |
[
"import os\nimport argparse\nimport numpy as np\nfrom PIL import Image\n\nfrom src.utils.io import read_dir\nfrom src.preprocess import get_seg\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--points_dir', type=str)\n parser.add_argument('--output_dir', type=str)\n args = parser.parse_args()\n\n os.makedirs(args.output_dir, exist_ok=True)\n\n points_files = read_dir(args.points_dir)\n\n num_points = len(points_files)\n print('\\nGetting segmentations from points:\\n')\n for i, points_path in enumerate(points_files):\n points_name = os.path.basename(points_path)\n\n print(f'\\t{i+1:>3}/{num_points} File {points_name}', end=' ', flush=True)\n\n points = np.load(points_path)\n\n # Get label image from points\n label = get_seg(points)\n\n # Save label image\n output_path = os.path.join(args.output_dir, points_name.split('.')[0] + '.png')\n Image.fromarray(np.uint8(label)).save(output_path)\n\n print('Ok')\n\n print('\\nDone.\\n')\n"
] |
[
[
"numpy.load",
"numpy.uint8"
]
] |
Kleinjohann/elephant
|
[
"74ef4a27f1d208fa0aab94f33f315907401da3bb"
] |
[
"elephant/test/test_asset.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nUnit tests for the ASSET analysis.\n\n:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.\n:license: Modified BSD, see LICENSE.txt for details.\n\"\"\"\n\nimport unittest\nimport numpy as np\nimport scipy.spatial\nimport quantities as pq\nimport neo\n\ntry:\n import sklearn\nexcept ImportError:\n HAVE_SKLEARN = False\nelse:\n import elephant.asset as asset\n HAVE_SKLEARN = True\n stretchedmetric2d = asset._stretched_metric_2d\n cluster = asset.cluster_matrix_entries\n\n\[email protected](HAVE_SKLEARN, 'requires sklearn')\nclass AssetTestCase(unittest.TestCase):\n\n def test_stretched_metric_2d_size(self):\n nr_points = 4\n x = np.arange(nr_points)\n D = stretchedmetric2d(x, x, stretch=1, ref_angle=45)\n self.assertEqual(D.shape, (nr_points, nr_points))\n\n def test_stretched_metric_2d_correct_stretching(self):\n x = (0, 1, 0)\n y = (0, 0, 1)\n stretch = 10\n ref_angle = 0\n D = stretchedmetric2d(x, y, stretch=stretch, ref_angle=ref_angle)\n self.assertEqual(D[0, 1], 1)\n self.assertEqual(D[0, 2], stretch)\n\n def test_stretched_metric_2d_symmetric(self):\n x = (1, 2, 2)\n y = (1, 2, 0)\n stretch = 10\n D = stretchedmetric2d(x, y, stretch=stretch, ref_angle=45)\n np.testing.assert_array_almost_equal(D, D.T, decimal=12)\n\n def test_stretched_metric_2d_equals_euclidean_if_stretch_1(self):\n x = np.arange(10)\n y = y = x ** 2 - 2 * x - 4\n # compute stretched distance matrix\n stretch = 1\n D = stretchedmetric2d(x, y, stretch=stretch, ref_angle=45)\n # Compute Euclidean distance matrix\n points = np.vstack([x, y]).T\n E = scipy.spatial.distance_matrix(points, points)\n # assert D == E\n np.testing.assert_array_almost_equal(D, E, decimal=12)\n\n def test_cluster_correct(self):\n mat = np.zeros((6, 6))\n mat[[2, 4, 5], [0, 0, 1]] = 1\n mat_clustered = cluster(mat, eps=4, min=2, stretch=6)\n\n mat_correct = np.zeros((6, 6))\n mat_correct[[4, 5], [0, 1]] = 1\n mat_correct[2, 0] = -1\n np.testing.assert_array_equal(mat_clustered, mat_correct)\n\n def test_cluster_symmetric(self):\n x = [0, 1, 2, 5, 6, 7]\n y = [3, 4, 5, 1, 2, 3]\n mat = np.zeros((10, 10))\n mat[x, y] = 1\n mat = mat + mat.T\n # compute stretched distance matrix\n mat_clustered = cluster(mat, eps=4, min=2, stretch=6)\n mat_equals_m1 = (mat_clustered == -1)\n mat_equals_0 = (mat_clustered == 0)\n mat_larger_0 = (mat_clustered > 0)\n np.testing.assert_array_equal(mat_equals_m1, mat_equals_m1.T)\n np.testing.assert_array_equal(mat_equals_0, mat_equals_0.T)\n np.testing.assert_array_equal(mat_larger_0, mat_larger_0.T)\n\n def test_sse_difference(self):\n a = {(1, 2): set([1, 2, 3]), (3, 4): set([5, 6]), (6, 7): set([0, 1])}\n b = {(1, 2): set([1, 2, 5]), (5, 6): set([0, 2]), (6, 7): set([0, 1])}\n diff_ab_pixelwise = {(3, 4): set([5, 6])}\n diff_ba_pixelwise = {(5, 6): set([0, 2])}\n diff_ab_linkwise = {(1, 2): set([3]), (3, 4): set([5, 6])}\n diff_ba_linkwise = {(1, 2): set([5]), (5, 6): set([0, 2])}\n self.assertEqual(\n asset.sse_difference(a, b, 'pixelwise'), diff_ab_pixelwise)\n self.assertEqual(\n asset.sse_difference(b, a, 'pixelwise'), diff_ba_pixelwise)\n self.assertEqual(\n asset.sse_difference(a, b, 'linkwise'), diff_ab_linkwise)\n self.assertEqual(\n asset.sse_difference(b, a, 'linkwise'), diff_ba_linkwise)\n\n def test_sse_intersection(self):\n a = {(1, 2): set([1, 2, 3]), (3, 4): set([5, 6]), (6, 7): set([0, 1])}\n b = {(1, 2): set([1, 2, 5]), (5, 6): set([0, 2]), (6, 7): set([0, 1])}\n inters_ab_pixelwise = {(1, 2): set([1, 2, 3]), (6, 7): set([0, 1])}\n inters_ba_pixelwise = {(1, 2): set([1, 2, 5]), (6, 7): set([0, 1])}\n inters_ab_linkwise = {(1, 2): set([1, 2]), (6, 7): set([0, 1])}\n inters_ba_linkwise = {(1, 2): set([1, 2]), (6, 7): set([0, 1])}\n self.assertEqual(\n asset.sse_intersection(a, b, 'pixelwise'), inters_ab_pixelwise)\n self.assertEqual(\n asset.sse_intersection(b, a, 'pixelwise'), inters_ba_pixelwise)\n self.assertEqual(\n asset.sse_intersection(a, b, 'linkwise'), inters_ab_linkwise)\n self.assertEqual(\n asset.sse_intersection(b, a, 'linkwise'), inters_ba_linkwise)\n\n def test_sse_relations(self):\n a = {(1, 2): set([1, 2, 3]), (3, 4): set([5, 6]), (6, 7): set([0, 1])}\n b = {(1, 2): set([1, 2, 5]), (5, 6): set([0, 2]), (6, 7): set([0, 1])}\n c = {(5, 6): set([0, 2])}\n d = {(3, 4): set([0, 1]), (5, 6): set([0, 1, 2])}\n self.assertTrue(asset.sse_isequal({}, {}))\n self.assertTrue(asset.sse_isequal(a, a))\n self.assertFalse(asset.sse_isequal(b, c))\n self.assertTrue(asset.sse_isdisjoint(a, c))\n self.assertTrue(asset.sse_isdisjoint(a, d))\n self.assertFalse(asset.sse_isdisjoint(a, b))\n self.assertTrue(asset.sse_issub(c, b))\n self.assertTrue(asset.sse_issub(c, d))\n self.assertFalse(asset.sse_issub(a, b))\n self.assertTrue(asset.sse_issuper(b, c))\n self.assertTrue(asset.sse_issuper(d, c))\n self.assertFalse(asset.sse_issuper(a, b))\n self.assertTrue(asset.sse_overlap(a, b))\n self.assertFalse(asset.sse_overlap(c, d))\n\n def test_mask_matrix(self):\n mat1 = np.array([[0, 1], [1, 2]])\n mat2 = np.array([[2, 1], [1, 3]])\n mask_1_2 = asset.mask_matrices([mat1, mat2], [1, 2])\n mask_1_2_correct = np.array([[False, False], [False, True]])\n self.assertTrue(np.all(mask_1_2 == mask_1_2_correct))\n self.assertIsInstance(mask_1_2[0, 0], np.bool_)\n\n def test_cluster_matrix_entries(self):\n mat = np.array([[False, False, True, False],\n [False, True, False, False],\n [True, False, False, True],\n [False, False, True, False]])\n clustered1 = asset.cluster_matrix_entries(\n mat, eps=1.5, min=2, stretch=1)\n clustered2 = asset.cluster_matrix_entries(\n mat, eps=1.5, min=3, stretch=1)\n clustered1_correctA = np.array([[0, 0, 1, 0],\n [0, 1, 0, 0],\n [1, 0, 0, 2],\n [0, 0, 2, 0]])\n clustered1_correctB = np.array([[0, 0, 2, 0],\n [0, 2, 0, 0],\n [2, 0, 0, 1],\n [0, 0, 1, 0]])\n clustered2_correct = np.array([[0, 0, 1, 0],\n [0, 1, 0, 0],\n [1, 0, 0, -1],\n [0, 0, -1, 0]])\n self.assertTrue(np.all(clustered1 == clustered1_correctA) or\n np.all(clustered1 == clustered1_correctB))\n self.assertTrue(np.all(clustered2 == clustered2_correct))\n\n def test_intersection_matrix(self):\n st1 = neo.SpikeTrain([1, 2, 4]*pq.ms, t_stop=6*pq.ms)\n st2 = neo.SpikeTrain([1, 3, 4]*pq.ms, t_stop=6*pq.ms)\n st3 = neo.SpikeTrain([2, 5]*pq.ms, t_start=1*pq.ms, t_stop=6*pq.ms)\n st4 = neo.SpikeTrain([1, 3, 6]*pq.ms, t_stop=8*pq.ms)\n binsize = 1 * pq.ms\n\n # Check that the routine works for correct input...\n # ...same t_start, t_stop on both time axes\n imat_1_2, xedges, yedges = asset.intersection_matrix(\n [st1, st2], binsize, dt=5*pq.ms)\n trueimat_1_2 = np.array([[0., 0., 0., 0., 0.],\n [0., 2., 1., 1., 2.],\n [0., 1., 1., 0., 1.],\n [0., 1., 0., 1., 1.],\n [0., 2., 1., 1., 2.]])\n self.assertTrue(np.all(xedges == np.arange(6)*pq.ms)) # correct bins\n self.assertTrue(np.all(yedges == np.arange(6)*pq.ms)) # correct bins\n self.assertTrue(np.all(imat_1_2 == trueimat_1_2)) # correct matrix\n # ...different t_start, t_stop on the two time axes\n imat_1_2, xedges, yedges = asset.intersection_matrix(\n [st1, st2], binsize, t_start_y=1*pq.ms, dt=5*pq.ms)\n trueimat_1_2 = np.array([[0., 0., 0., 0., 0.],\n [2., 1., 1., 2., 0.],\n [1., 1., 0., 1., 0.],\n [1., 0., 1., 1., 0.],\n [2., 1., 1., 2., 0.]])\n self.assertTrue(np.all(xedges == np.arange(6)*pq.ms)) # correct bins\n self.assertTrue(np.all(imat_1_2 == trueimat_1_2)) # correct matrix\n\n # Check that errors are raised correctly...\n # ...for dt too large compared to length of spike trains\n self.assertRaises(ValueError, asset.intersection_matrix,\n spiketrains=[st1, st2], binsize=binsize, dt=8*pq.ms)\n # ...for different SpikeTrain's t_starts\n self.assertRaises(ValueError, asset.intersection_matrix,\n spiketrains=[st1, st3], binsize=binsize, dt=8*pq.ms)\n # ...when the analysis is specified for a time span where the\n # spike trains are not defined (e.g. t_start_x < SpikeTrain.t_start)\n self.assertRaises(ValueError, asset.intersection_matrix,\n spiketrains=[st1, st2], binsize=binsize, dt=8*pq.ms,\n t_start_x=-2*pq.ms, t_start_y=-2*pq.ms)\n\n\ndef suite():\n suite = unittest.makeSuite(AssetTestCase, 'test')\n return suite\n\n\ndef run():\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite())\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.arange",
"numpy.testing.assert_array_equal",
"numpy.all",
"numpy.array",
"numpy.zeros",
"numpy.vstack",
"numpy.testing.assert_array_almost_equal"
]
] |
aoifemcdonagh/yolov4-custom-functions
|
[
"0a4ec58597a5bb3429f7d09ea157643c7a4e66f9"
] |
[
"detect_video.py"
] |
[
"import os\r\n# comment out below line to enable tensorflow outputs\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\r\nimport time\r\nimport tensorflow as tf\r\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\r\nif len(physical_devices) > 0:\r\n tf.config.experimental.set_memory_growth(physical_devices[0], True)\r\nfrom absl import app, flags, logging\r\nfrom absl.flags import FLAGS\r\nimport core.utils as utils\r\nfrom core.yolov4 import filter_boxes\r\nfrom core.functions import *\r\nfrom tensorflow.python.saved_model import tag_constants\r\nfrom PIL import Image\r\nimport cv2\r\nimport numpy as np\r\nfrom tensorflow.compat.v1 import ConfigProto\r\nfrom tensorflow.compat.v1 import InteractiveSession\r\n\r\nflags.DEFINE_string('framework', 'tf', '(tf, tflite, trt')\r\nflags.DEFINE_string('weights', './checkpoints/yolov4-416',\r\n 'path to weights file')\r\nflags.DEFINE_integer('size', 416, 'resize images to')\r\nflags.DEFINE_boolean('tiny', False, 'yolo or yolo-tiny')\r\nflags.DEFINE_string('model', 'yolov4', 'yolov3 or yolov4')\r\nflags.DEFINE_string('video', './data/video/video.mp4', 'path to input video or set to 0 for webcam')\r\nflags.DEFINE_string('output', None, 'path to output video')\r\nflags.DEFINE_string('output_format', 'XVID', 'codec used in VideoWriter when saving video to file')\r\nflags.DEFINE_float('iou', 0.45, 'iou threshold')\r\nflags.DEFINE_float('score', 0.50, 'score threshold')\r\nflags.DEFINE_boolean('count', False, 'count objects within video')\r\nflags.DEFINE_boolean('dont_show', False, 'dont show video output')\r\nflags.DEFINE_boolean('info', False, 'print info on detections')\r\nflags.DEFINE_boolean('crop', False, 'crop detections from images')\r\nflags.DEFINE_boolean('plate', False, 'perform license plate recognition')\r\n\r\ndef main(_argv):\r\n config = ConfigProto()\r\n config.gpu_options.allow_growth = True\r\n session = InteractiveSession(config=config)\r\n STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)\r\n input_size = FLAGS.size\r\n video_path = FLAGS.video\r\n # get video name by using split method\r\n video_name = video_path.split('/')[-1]\r\n video_name = video_name.split('.')[0]\r\n if FLAGS.framework == 'tflite':\r\n interpreter = tf.lite.Interpreter(model_path=FLAGS.weights)\r\n interpreter.allocate_tensors()\r\n input_details = interpreter.get_input_details()\r\n output_details = interpreter.get_output_details()\r\n print(input_details)\r\n print(output_details)\r\n else:\r\n saved_model_loaded = tf.saved_model.load(FLAGS.weights, tags=[tag_constants.SERVING])\r\n infer = saved_model_loaded.signatures['serving_default']\r\n\r\n # begin video capture\r\n try:\r\n vid = cv2.VideoCapture(int(video_path))\r\n except:\r\n vid = cv2.VideoCapture(video_path)\r\n\r\n out = None\r\n\r\n if FLAGS.output:\r\n # by default VideoCapture returns float instead of int\r\n width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n fps = int(vid.get(cv2.CAP_PROP_FPS))\r\n codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)\r\n out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))\r\n\r\n frame_num = 0\r\n while True:\r\n return_value, frame = vid.read()\r\n if return_value:\r\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n frame_num += 1\r\n image = Image.fromarray(frame)\r\n else:\r\n print('Video has ended or failed, try a different video format!')\r\n break\r\n \r\n frame_size = frame.shape[:2]\r\n image_data = cv2.resize(frame, (input_size, input_size))\r\n image_data = image_data / 255.\r\n image_data = image_data[np.newaxis, ...].astype(np.float32)\r\n start_time = time.time()\r\n\r\n if FLAGS.framework == 'tflite':\r\n interpreter.set_tensor(input_details[0]['index'], image_data)\r\n interpreter.invoke()\r\n pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]\r\n if FLAGS.model == 'yolov3' and FLAGS.tiny == True:\r\n boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,\r\n input_shape=tf.constant([input_size, input_size]))\r\n else:\r\n boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,\r\n input_shape=tf.constant([input_size, input_size]))\r\n else:\r\n batch_data = tf.constant(image_data)\r\n pred_bbox = infer(batch_data)\r\n for key, value in pred_bbox.items():\r\n boxes = value[:, :, 0:4]\r\n pred_conf = value[:, :, 4:]\r\n\r\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\r\n boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),\r\n scores=tf.reshape(\r\n pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),\r\n max_output_size_per_class=50,\r\n max_total_size=50,\r\n iou_threshold=FLAGS.iou,\r\n score_threshold=FLAGS.score\r\n )\r\n\r\n # format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, xmax, ymax\r\n original_h, original_w, _ = frame.shape\r\n bboxes = utils.format_boxes(boxes.numpy()[0], original_h, original_w)\r\n\r\n pred_bbox = [bboxes, scores.numpy()[0], classes.numpy()[0], valid_detections.numpy()[0]]\r\n\r\n # read in all class names from config\r\n class_names = utils.read_class_names(cfg.YOLO.CLASSES)\r\n\r\n # by default allow all classes in .names file\r\n #allowed_classes = list(class_names.values())\r\n \r\n # custom allowed classes (uncomment line below to allow detections for only people)\r\n allowed_classes = ['person']\r\n \r\n # if crop flag is enabled, crop each detection and save it as new image\r\n if FLAGS.crop:\r\n crop_rate = 150 # capture images every so many frames (ex. crop photos every 150 frames)\r\n crop_path = os.path.join(os.getcwd(), 'detections', 'crop', video_name)\r\n try:\r\n os.mkdir(crop_path)\r\n except FileExistsError:\r\n pass\r\n if frame_num % crop_rate == 0:\r\n final_path = os.path.join(crop_path, 'frame_' + str(frame_num))\r\n try:\r\n os.mkdir(final_path)\r\n except FileExistsError:\r\n pass \r\n crop_objects(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), pred_bbox, final_path, allowed_classes)\r\n else:\r\n pass\r\n\r\n if FLAGS.count:\r\n # count objects found\r\n counted_classes = count_objects(pred_bbox, by_class = True, allowed_classes=allowed_classes)\r\n # loop through dict and print\r\n for key, value in counted_classes.items():\r\n print(\"Number of {}s: {}\".format(key, value))\r\n image = utils.draw_bbox(frame, pred_bbox, FLAGS.info, counted_classes, allowed_classes=allowed_classes, read_plate=FLAGS.plate)\r\n else:\r\n image = utils.draw_bbox(frame, pred_bbox, FLAGS.info, allowed_classes=allowed_classes, read_plate=FLAGS.plate)\r\n \r\n fps = 1.0 / (time.time() - start_time)\r\n print(\"FPS: %.2f\" % fps)\r\n result = np.asarray(image)\r\n #cv2.namedWindow(\"result\", cv2.WINDOW_AUTOSIZE)\r\n result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\r\n \r\n if not FLAGS.dont_show:\r\n cv2.imshow(\"result\", result)\r\n \r\n if FLAGS.output:\r\n out.write(result)\r\n if cv2.waitKey(1) & 0xFF == ord('q'): break\r\n #cv2.destroyAllWindows()\r\n\r\nif __name__ == '__main__':\r\n try:\r\n app.run(main)\r\n except SystemExit:\r\n pass\r\n"
] |
[
[
"tensorflow.compat.v1.ConfigProto",
"tensorflow.constant",
"tensorflow.saved_model.load",
"tensorflow.config.experimental.set_memory_growth",
"numpy.asarray",
"tensorflow.lite.Interpreter",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.shape",
"tensorflow.compat.v1.InteractiveSession"
]
] |
mananatee/PyNeuron-Toolbox
|
[
"43f5f4201ac9b17126c7a29e21b7afa17f4cab08"
] |
[
"PyNeuronToolbox/synapses.py"
] |
[
"from __future__ import division\nimport numpy as np\n\ndef add_exp2(h,seg,spktimes,e=0,tau1=0.5,tau2=20,weight=0.001):\n \"\"\"\n Adds a double-exponential synapse at seg, with spikes\n specified by the list/np.array spktimes. Optional arguments\n specify parameters for the synapse. Returns a list of\n hocObjects that must be kept in memory (see below).\n\n IMPORTANT: This function requires that you have the vecevent.mod\n file compiled and loaded into python. This file can\n be found in nrn/share/examples/nrniv/netcon/\n\n IMPORTANT: Neuron requires that you keep the Exp2Syn, VecStim,\n NetCon, and Vector objects in memory during the\n simulation. You will also need these objects if\n you wish to alter the weight of the synapse\n or other parameters during the simulation.\n \"\"\"\n syn = h.Exp2Syn(seg)\n syn.e = 0\n syn.tau1 = 0.5\n syn.tau2 = 20\n vs = h.VecStim()\n vec = h.Vector(np.sort(spktimes)) # Spend a bit of overhead to make sure spktimes are sorted\n vs.play(vec)\n nc = h.NetCon(vs,syn)\n nc.weight[0] = weight\n return [syn,vs,nc,vec] # All these things need to be kept in memory\n"
] |
[
[
"numpy.sort"
]
] |
clive819/YOLO
|
[
"2d3afef299b888a60f24df027506a0f16cb1344b"
] |
[
"train.py"
] |
[
"from torch.utils.data.dataloader import DataLoader\nfrom torch.optim import SGD, Adam\nfrom config import device, tc\nfrom model import YOLO\nfrom utils import *\nimport torch\nimport numpy as np\n\n\n# MARK: - load data\ncocoDataset = COCODataset(tc.imageDir, tc.annFile, fromInternet=False if tc.imageDir else True)\ndataLoader = DataLoader(cocoDataset, batch_size=tc.batchSize, shuffle=True)\n\n\n# MARK: - train\nmodel = YOLO().to(device)\nif tc.preTrainedWeight:\n model.load_state_dict(torch.load(tc.preTrainedWeight, map_location=device))\n model.warmUpBatch = tc.warmUpBatches\n\noptimizer = SGD(model.parameters(), lr=1e-3)\nprevBestLoss = np.inf\nbatches = len(dataLoader)\nlogger = MetricsLogger()\n\n\nmodel.train()\nfor epoch in range(tc.epochs):\n losses = []\n for batch, (x, y, z) in enumerate(dataLoader):\n x, y, z = x.to(device), y.to(device), z.to(device)\n\n loss = model(x, y, z)\n losses.append(loss.cpu().item())\n\n metrics = model.metrics\n logger.step(metrics, epoch, batch)\n logger.step({'Loss': losses[-1]}, epoch, batch)\n log = 'Epoch {} | {} / {}'.format(epoch, batch, batches)\n for key in metrics:\n log += ' | {}: {:.4f}'.format(key, metrics[key])\n log += ' | loss: {:.4f}\\r'.format(losses[-1])\n print(log, end='')\n\n optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1)\n optimizer.step()\n\n logger.epochEnd(epoch)\n avgLoss = np.mean(losses)\n print('\\nEpoch {}, loss: {:.8f}'.format(epoch, avgLoss))\n\n if avgLoss < prevBestLoss:\n print('[+] Loss improved from {:.8f} to {:.8f}, saving model...'.format(prevBestLoss, avgLoss))\n torch.save(model.state_dict(), 'model.pt')\n prevBestLoss = avgLoss\n logger.addScalar('Model', avgLoss, epoch)\n logger.flush()\nlogger.close()\n"
] |
[
[
"torch.utils.data.dataloader.DataLoader",
"numpy.mean",
"torch.load"
]
] |
GreenWaves-Technologies/gap_sdk
|
[
"6d255c70883cf157d76d006b2dbf55bc6974b21f"
] |
[
"tools/nntool/importer/tflite2/handlers/backend/conv_2d.py"
] |
[
"# Copyright (C) 2020 GreenWaves Technologies, SAS\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n\nimport numpy as np\nfrom graph.dim import Conv2DFilterDim, DilationDim, Dim, StrideDim\nfrom graph.types import ConstantInputParameters, Conv2DParameters, NNEdge\nfrom importer.common.provisional_dim import ProvisionalDim\nfrom importer.tflite2.common import LOG\nfrom importer.tflite2.common.tflite_node import TFLiteNode\nfrom importer.tflite2.tflite_schema_head.Conv2DOptions import Conv2DOptions\n\nfrom ..backend_handler import BackendHandler\nfrom ..handler import tflite_op\nfrom .filter_mixin import FilterMixin\n\n\n@tflite_op(\"CONV_2D\")\nclass Conv2D(FilterMixin, BackendHandler):\n\n @classmethod\n def version_1(cls, node: TFLiteNode, **kwargs):\n node_opts = node.get_options(Conv2DOptions)\n G = kwargs['G']\n opts = kwargs['opts']\n all_nodes = kwargs['all_nodes']\n\n inputs = [all_nodes[t] for t in node.input]\n\n x = inputs[0]\n x = cls.remove_known_batch_dimension(G, x, node)\n x_shape = x[2].shape\n in_b, h, w, in_c = tuple(x_shape)\n\n filt = inputs[1]\n weights_node = filt[0]\n filt_shape = filt[2].shape\n # ['in_c', 'h', 'w', 'out_c']\n filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape)\n\n # get filter dimensions\n if filt_h > h or filt_w > w:\n LOG.warning(\"Filter %s of shape [%dx%d] is bigger than input of shape [%dx%d]\",\n node.name, filt_h, filt_w, h, w)\n\n filt_dim = Conv2DFilterDim(filt_h, filt_w,\n filt_out_c, in_c=filt_in_c)\n filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)\n\n # compute padding\n pad = cls.get_tf_padding(node_opts.Padding())\n\n # does it have biases\n if len(inputs) > 2:\n bias = inputs[2]\n bias_node = bias[0]\n else:\n bias_node = ConstantInputParameters(f'{node.name}_bias',\n dims=Dim.unnamed([filt_out_c]),\n value=np.zeros([filt_out_c], dtype=np.float32)) # TODO - check\n groups = in_c // filt_in_c\n params = Conv2DParameters(node.name,\n filt=filt_dim,\n stride=StrideDim(\n node_opts.StrideH(), node_opts.StrideW()),\n dilation=DilationDim(node_opts.DilationHFactor(),\n node_opts.DilationWFactor()),\n groups=groups,\n padding=pad,\n has_bias=True,\n in_dims_hint=[['h', 'w', 'c'], cls.TF_LITE_FILTER_ORDER.copy(), [\n 'out_c']],\n out_dims_hint=[['h', 'w', 'c']],\n constant_store=G.constant_store)\n G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))\n G.add_edge(NNEdge(from_node=bias_node, to_node=params, to_idx=2))\n cls.new_load_filter_parameters(\n G, params, params.filter.actual_shape, params.filter.get_order_idx(\n 'out_c'),\n node.input[0], weights_node, bias_node, node.output[0], opts)\n\n in_dim = Dim.named_ordered(h=h, w=w, c=in_c)\n out_dims = params.get_output_size(\n [in_dim, Dim.unnamed(filt_dim.shape), Dim.unnamed([filt_out_c])])\n pout_dims = ProvisionalDim([None] + out_dims[0].shape)\n G.add_edge(\n NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))\n oparams = cls.fuse_activation(node_opts, node.name, params, **kwargs)\n all_nodes[node.output[0]] = (oparams, 0, pout_dims)\n return oparams\n"
] |
[
[
"numpy.zeros"
]
] |
G-Wang/wavenet
|
[
"a0e39e2c128dec1ad2d534e4b7a436d67a119f9a"
] |
[
"pytorch/gary_sampler.py"
] |
[
"import argparse\nimport json\nimport os\nimport random\nimport torch\nimport torch.utils.data\nimport sys\nimport audio as deepaudio\nfrom hparams import hparams\n\nimport utils\n\nclass DeepMels(torch.utils.data.Dataset):\n \"\"\"\n This is the main class that calculates the spectrogram and returns the\n spectrogram, audio pair.\n\n This uses r9r9's deepvoice preprocessing to create mel spectrogram.\n \"\"\"\n def __init__(self, training_files, segment_length, mu_quantization,\n filter_length, hop_length, win_length, sampling_rate):\n audio_files = utils.files_to_list(training_files)\n self.audio_files = audio_files\n random.seed(1234)\n random.shuffle(self.audio_files)\n \n self.segment_length = segment_length\n self.mu_quantization = mu_quantization\n self.sampling_rate = sampling_rate\n \n def __getitem__(self, index):\n # Read audio\n filename = self.audio_files[index]\n wav = deepaudio.load_wav(filename)\n # load in raw_audio via utils\n raw_audio, _ = utils.load_wav_to_torch(filename)\n # convert wav to numpy\n audio = torch.from_numpy(wav)\n # take segment\n if audio.size(0) >= self.segment_length:\n max_audio_start = audio.size(0) - self.segment_length\n audio_start = random.randint(0, max_audio_start)\n audio = audio[audio_start:audio_start+self.segment_length]\n # update raw audio as well\n raw_audio = raw_audio[audio_start:audio_start+self.segment_length]\n else:\n audio = torch.nn.functional.pad(audio, (0, self.segment_length - audio.size(0)), 'constant').data\n # pad raw audio as well\n raw_audio = torch.nn.functional.pad(raw_audio, (0, self.segment_length - raw_audio.size(0)), 'constant').data\n # compute mel\n mel = deepaudio.melspectrogram(audio.numpy())\n # convert mel to torch\n mel = torch.from_numpy(mel)\n audio = utils.mu_law_encode(raw_audio / utils.MAX_WAV_VALUE, self.mu_quantization)\n return (mel, audio)\n \n def __len__(self):\n return len(self.audio_files)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Turns audio files into mel-spectrogram representations for inference\n\n Uses the data portion of the config for audio processing parameters, \n but ignores training files and segment lengths.\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', \"--audio_list\", required=True, type=str,\n help='File containing list of wavefiles')\n parser.add_argument('-o', \"--output_dir\", required=True, type=str,\n help='Directory to put Mel-Spectrogram Tensors')\n parser.add_argument('-c', '--config', type=str,\n help='JSON file for configuration')\n \n args = parser.parse_args()\n\n filepaths = utils.files_to_list(args.audio_list)\n \n # Make directory if it doesn't exist\n if not os.path.isdir(args.output_dir):\n os.makedirs(args.output_dir)\n os.chmod(args.output_dir, 0o775)\n \n # Parse config. Only using data processing\n with open(args.config) as f:\n data = f.read()\n config = json.loads(data)\n data_config = config[\"data_config\"]\n mel_factory = Mel2SampOnehot(**data_config) \n \n for filepath in filepaths:\n audio, sampling_rate = utils.load_wav_to_torch(filepath)\n assert(sampling_rate == mel_factory.sampling_rate)\n melspectrogram = mel_factory.get_mel(audio)\n filename = os.path.basename(filepath)\n new_filepath = args.output_dir + '/' + filename + '.pt'\n print(new_filepath)\n torch.save(melspectrogram, new_filepath)"
] |
[
[
"torch.from_numpy",
"torch.save"
]
] |
potus28/ML-Project---GLN
|
[
"84e81b28fb8ee28ed3695f0148d6f278e82f78c3"
] |
[
"gln/test/model_inference.py"
] |
[
"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\n\nimport rdkit\nfrom rdkit import Chem\nimport os\nimport numpy as np\nimport torch\nimport pickle as cp\nimport math\nfrom scipy.special import softmax\nfrom gln.data_process.data_info import DataInfo, load_bin_feats\nfrom gln.mods.mol_gnn.mol_utils import SmartsMols, SmilesMols\nfrom gln.common.reactor import Reactor\nfrom gln.graph_logic.logic_net import GraphPath\n\n\nclass RetroGLN(object):\n def __init__(self, dropbox, model_dump):\n \"\"\"\n Args:\n dropbox: the dropbox folder\n model_dump: the ckpt folder, which contains model dump and model args\n \"\"\"\n assert os.path.isdir(model_dump)\n\n arg_file = os.path.join(model_dump, 'args.pkl')\n with open(arg_file, 'rb') as f:\n self.args = cp.load(f)\n self.args.dropbox = dropbox\n\n DataInfo.init(dropbox, self.args)\n load_bin_feats(dropbox, self.args)\n\n model_file = os.path.join(model_dump, 'model.dump')\n self.gln = GraphPath(self.args)\n self.gln.load_state_dict(torch.load(model_file))\n self.gln.cuda()\n self.gln.eval()\n\n self.prod_center_maps = {}\n self.cached_smarts = None\n\n def _ordered_tpls(self, cano_prod, beam_size, rxn_type):\n if (rxn_type, cano_prod) not in self.prod_center_maps:\n mol = Chem.MolFromSmiles(cano_prod)\n if mol is None:\n return None\n if self.cached_smarts is None:\n self.cached_smarts = []\n print('caching smarts centers')\n for sm in DataInfo.prod_cano_smarts:\n self.cached_smarts.append(Chem.MolFromSmarts(sm))\n\n prod_center_cand_idx = []\n for i, sm in enumerate(self.cached_smarts):\n if sm is not None and mol.HasSubstructMatch(sm):\n prod_center_cand_idx.append(i)\n self.prod_center_maps[(rxn_type, cano_prod)] = prod_center_cand_idx\n prod_center_cand_idx = self.prod_center_maps[(rxn_type, cano_prod)]\n\n # infer the reaction center\n if not len(prod_center_cand_idx):\n return None\n prod_center_mols = [SmartsMols.get_mol_graph(DataInfo.prod_cano_smarts[m]) for m in prod_center_cand_idx]\n prod_mol = SmilesMols.get_mol_graph(cano_prod)\n prod_center_scores = self.gln.prod_center_predicate.inference([prod_mol], [prod_center_mols])\n prod_center_scores = prod_center_scores.view(-1).data.cpu().numpy()\n top_centers = np.argsort(-1 * prod_center_scores)[:beam_size]\n top_center_scores = [prod_center_scores[i] for i in top_centers]\n top_center_mols = [prod_center_mols[i] for i in top_centers]\n top_center_smarts = [DataInfo.prod_cano_smarts[prod_center_cand_idx[i]] for i in top_centers]\n\n # infer the template\n list_of_list_tpls = []\n for i, c in enumerate(top_center_smarts):\n assert c in DataInfo.unique_tpl_of_prod_center\n if not rxn_type in DataInfo.unique_tpl_of_prod_center[c]:\n continue\n tpl_indices = DataInfo.unique_tpl_of_prod_center[c][rxn_type]\n tpls = [DataInfo.unique_templates[t][1] for t in tpl_indices]\n list_of_list_tpls.append(tpls)\n if not len(list_of_list_tpls):\n return None\n tpl_scores = self.gln.tpl_fwd_predicate.inference([prod_mol] * len(top_center_mols), list_of_list_tpls)\n tpl_scores = tpl_scores.view(-1).data.cpu().numpy()\n\n idx = 0\n tpl_with_scores = []\n for i, c in enumerate(top_center_scores):\n for tpl in list_of_list_tpls[i]:\n t_score = tpl_scores[idx]\n tot_score = c + t_score\n tpl_with_scores.append((tot_score, tpl))\n idx += 1\n tpl_with_scores = sorted(tpl_with_scores, key=lambda x: -1 * x[0])\n\n return tpl_with_scores\n\n def run(self, raw_prod, beam_size, topk, rxn_type='UNK'):\n \"\"\"\n Args:\n raw_prod: the single product smiles\n beam_size: the size for beam search\n topk: top-k prediction of reactants\n rxn_type: (optional) reaction type\n Return:\n a dictionary with the following keys:\n {\n 'reactants': the top-k prediction of reactants\n 'template': the list of corresponding reaction templates used\n 'scores': the scores for the corresponding predictions, in descending order\n }\n if no valid reactions are found, None will be returned\n \"\"\"\n cano_prod = DataInfo.get_cano_smiles(raw_prod)\n prod_mol = SmilesMols.get_mol_graph(cano_prod)\n tpl_with_scores = self._ordered_tpls(cano_prod, beam_size, rxn_type)\n\n if tpl_with_scores is None:\n return None\n # filter out invalid tpls\n list_of_list_reacts = []\n list_reacts = []\n list_tpls = []\n num_tpls = 0\n num_reacts = 0\n for prod_tpl_score, tpl in tpl_with_scores:\n pred_mols = Reactor.run_reaction(raw_prod, tpl)\n if pred_mols is not None and len(pred_mols):\n num_tpls += 1\n list_of_list_reacts.append(pred_mols)\n num_reacts += len(pred_mols)\n list_tpls.append((prod_tpl_score, tpl))\n if num_tpls >= beam_size:\n break\n\n list_rxns = []\n for i in range(len(list_of_list_reacts)):\n list_rxns.append([DataInfo.get_cano_smiles(r) + '>>' + cano_prod for r in list_of_list_reacts[i]])\n if len(list_rxns) and len(list_tpls):\n react_scores = self.gln.reaction_predicate.inference([prod_mol] * len(list_tpls), list_rxns)\n react_scores = react_scores.view(-1).data.cpu().numpy()\n\n idx = 0\n final_joint = []\n for i, (prod_tpl_score, tpl) in enumerate(list_tpls):\n for reacts in list_of_list_reacts[i]:\n r_score = react_scores[idx]\n tot_score = prod_tpl_score + r_score\n final_joint.append((tot_score, tpl, reacts))\n idx += 1\n final_joint = sorted(final_joint, key=lambda x: -1 * x[0])[:topk]\n scores = [t[0] for t in final_joint]\n scores = softmax([each_score for each_score in scores])\n list_reacts = [t[2] for t in final_joint]\n ret_tpls = [t[1] for t in final_joint]\n result = {'template': ret_tpls,\n 'reactants': list_reacts,\n 'scores': scores}\n else:\n result = {'template': [],\n 'reactants': [],\n 'scores': []}\n return result\n"
] |
[
[
"numpy.argsort",
"scipy.special.softmax",
"torch.load"
]
] |
Subaru-PFS/dev_pfsmodel
|
[
"d01cf03a4c4eaa01ba5a9590ccf17744a33bdb05"
] |
[
"2d_PSF_code/Zernike_Module.py"
] |
[
"\"\"\"\nFirst created on Mon Aug 13 10:01:03 2018\n\nMain code for the creation of the image for Zernike analysis;\nOther moduls avaliable are:\n Zernike_Cutting_Module\n Zernike_Analysis_Module\n\n\nVersions:\nOct 31, 2018; 0.1 -> 0.11 fixed FRD effect\nNov 1, 2018; 0.11 -> 0.12 added correct edges to the detector; fixed wrong behavior for misaligment\nNov 2, 2018; 0.12 -> 0.13 added lorentzian wings to the illumination of the pupil\nNov 3, 2018; 0.13 -> 0.13b fixed edges of detector when det_vert is not 1\nNov 12, 2018; 0.13b -> 0.13c changed parameter describing hexagonal effect \"f\" from 0.1 to 0.2\nNov 12, 2018; 0.13c -> 0.14 changed illumination description modifying entrance -> exit pupil illumination\nNov 29, 2018; 0.14 -> 0.14b added fixed scattering slope, deduced from large image in focus\nDec 16, 2018; 0.14b -> 0.14c allparameters_proposal_err from list to array\nDec 18, 2018; 0.14c -> 0.14d strutFrac upper limit to 0.13 in create_parInit\nDec 23, 2018; 0.14d -> 0.15 refactoring so that x_ilum and y_ilum is one\nDec 26, 2018; 0.15 -> 0.15b when in focus, create exactly 10x oversampling\nDec 31, 2018; 0.15b -> 0.16 major rewrite of downsampling algorithm\nJan 8, 2019; 0.16 -> 0.17 added support for zmax=22\nJan 14, 2019; 0.17 -> 0.18 fixed bug with dowsampling algorithm - I was just taking central values\nJan 15, 2019; 0.18 -> 0.19 added simple algorithm to interpolate between 1/10 pixels in the best position\nFeb 15, 2019; 0.19 -> 0.20 updated analysis for the new data\nFeb 21, 2019; 0.20 -> 0.20b test parameter for showing globalparamers outside their limits\nFeb 22, 2019; 0.20 -> 0.21 added support for Zernike higher than 22\nFeb 22, 2019; 0.21 -> 0.21b added support for return image along side likelihood\nApr 17, 2019; 0.21b -> 0.21c changed defintion of residuals from (model-data) to (data-model)\nJun 4, 2019; 0.21c -> 0.21d slight cleaning of the code, no functional changes\nJun 26, 2019; 0.21d -> 0.21e included variable ``dataset'',\n which denots which data we are using in the analysis\nJul 29, 2019; 0.21e -> 0.21f changed the spread of paramters when drawing initial solutions, based on data\nSep 11, 2019; 0.21f -> 0.21g globalparameters_flat_6<1 to globalparameters_flat_6<=1\nOct 10, 2019: 0.21g -> 0.21h scattered_light_kernel saving option\nOct 31, 2019: 0.21h -> 0.22 (re)introduced small amount of apodization (PIPE2D-463)\nOct 31, 2019: 0.22 -> 0.22b introduced verbosity\nNov 07, 2019: 0.22b -> 0.22c nan values can pass through find_single_realization_min_cut\nNov 08, 2019: 0.22c -> 0.22d changes to resizing and centering\nNov 13, 2019: 0.22d -> 0.23 major changes to centering - chief ray in the center of oversampled image\nNov 15, 2019: 0.23 -> 0.24 change likelihood definition\nDec 16, 2019: 0.24 -> 0.24a added iluminaton with z4,z11,z22=0\nJan 14, 2020: 0.24a -> 0.24b added verbosity in find_single_realization_min_cut function\nJan 31, 2020: 0.24b -> 0.25 added support for data contaning spots from two wavelengths\nFeb 11, 2020: 0.25 -> 0.26 proper bilinear interpolation of the spots\nFeb 17, 2020: 0.26 -> 0.26a increased speed when save parameter=0\nFeb 18, 2020: 0.26a -> 0.26b mask image going through subpixel interpolation\nFeb 19, 2020: 0.26b -> 0.26c normalization of sci image takes into account mask\nMar 1, 2020: 0.26c -> 0.27 apodization scales with the size of input images\nMar 4, 2020: 0.27 -> 0.28 (re-)introduced custom size of pupil image\nMar 6, 2020: 0.28 -> 0.28b refactored cut_square function (making it much faster)\nMar 8, 2020: 0.28b -> 0.28c set limit in grating factor to 120000 in generating code\nApr 1, 2020: 0.28c -> 0.28d svd_invert function\nMay 6, 2020: 0.28d -> 0.28e clarified and expanded comments in postprocessing part\nJun 28, 2020: 0.28e -> 0.29 added multi analysis\nJul 02, 2020: 0.29 -> 0.30 added internal fitting for flux\nJul 02, 2020: 0.30 -> 0.30a lnlike_Neven_multi_same_spot can accept both 1d and 2d input\nJul 07, 2020: 0.30a -> 0.30b added threading time information\nJul 09, 2020: 0.30b -> 0.30c expwf_grid changed to complex64 from complex128\nJul 09, 2020: 0.30c -> 0.30d changed all float64 to float32\nJul 16, 2020: 0.30d -> 0.31 moved all fft to scipy.signal.fftconvolve\nJul 20, 2020: 0.31 -> 0.32 introduced renormalization_of_var_sum for multi_var analysis\nJul 26, 2020: 0.32 -> 0.32a only changed last value of allparameters if len()==42\nAug 10, 2020: 0.32a -> 0.33 added extra Zernike to parInit\nAug 12, 2020: 0.33 -> 0.33a changed iters to 6 in fluxfit\nSep 08, 2020: 0.33a -> 0.33b added test_run to help with debugging\nOct 05, 2020: 0.33b -> 0.33c trying to always output flux multiplier when fit_for_flux\nOct 06, 2020: 0.33c -> 0.34 added posibility to specify position of created psf\nOct 13, 2020: 0.34 -> 0.34b added finishing step of centering, done with Nelder-Mead\nOct 22, 2020: 0.34b -> 0.35 added class that does Tokovinin multi analysis\nNov 03, 2020: 0.35 -> 0.35a create parInit up to z=22, with larger parametrization\nNov 05, 2020: 0.35a -> 0.35b return same value if Tokovinin does not work\nNov 16, 2020: 0.35b -> 0.35c modified movement of parameters\nNov 17, 2020: 0.35c -> 0.35d small fixes in check_global_parameters with paramters 0 and 1\nNov 19, 2020: 0.35d -> 0.36 realized that vertical strut is different than others -\n first, simplest implementation\nNov 19, 2020: 0.36 -> 0.36a modified parInit movements for multi (mostly reduced)\nDec 05, 2020: 0.36a -> 0.37 misalignment and variable strut size\nDec 13, 2020: 0.37 -> 0.37a changed weights in multi_same_spot\nJan 17, 2021: 0.37a -> 0.37b accept True as input for simulation00\nJan 25, 2021: 0.37b -> 0.37c fixed fillCrop function in PsfPosition, slice limits need to be integers\nJan 26, 2021: 0.37c -> 0.38 PIPE2D-701, fixed width of struts implementation\nJan 28, 2021: 0.38 -> 0.39 added flux mask in chi**2 calculation\nJan 28, 2021: 0.39 -> 0.39b lowered allowed values for pixel_effect and fiber_r\nFeb 08, 2021: 0.39b -> 0.4 fixed bilinear interpolation for secondary, x and y confusion\nFeb 25, 2021: 0.4 -> 0.40a added directory for work on Tiger\nMar 05, 2021: 0.40a -> 0.41 introduced create_custom_var function\nMar 08, 2021: 0.41 -> 0.41a added suport for saving intermediate images to tiger\nMar 24, 2021: 0.41a -> 0.41b added support for masked images in find_centroid_of_flux\nMar 26, 2021: 0.41b -> 0.41c added create_custom_var function as a separate function\nMar 26, 2021: 0.41c -> 0.41d semi-implemented custom variance function in Tokovinin algorithm\nMar 26, 2021: 0.41d -> 0.41e model_multi_out has correct input parameters now\nApr 01, 2021: 0.41e -> 0.42 changed bug/feature in checking wide_43 and wide_42 parameters\nApr 02, 2021: 0.42 -> 0.43 changed width of slit shadow and slit holder shadow\nApr 04, 2021: 0.43 -> 0.44 implemented f_multiplier_factor\nApr 04, 2021: 0.44 -> 0.44a implemented possibility for using np.abs(chi) as likelihood\nApr 08, 2021: 0.44a -> 0.44b propagated change from 0.44a to Tokovinin algorithm\nApr 12, 2021: 0.44b -> 0.44c modified renormalization factors for abs(chi) value\nApr 13, 2021: 0.44c -> 0.44d fixed bug in the estimate of mean_value_of_background\nApr 14, 2021: 0.44d -> 0.44e mean_value_of_background estimated from sci or var data\nApr 22, 2021: 0.44e -> 0.44f introduced multi_background_factor\nApr 27, 2021: 0.44f -> 0.45 Tokovinin now works much quicker with multi_background_factor\n (create_simplified_H updated)\nApr 29, 2021: 0.45 -> 0.45a many changes in order to run create_simplified_H efficently\nMay 07, 2021: 0.45a -> 0.45b if Premodel analysis failed, return 15 values\nMay 08, 2021: 0.45b -> 0.45c changed that images of same size do not crash out_images creation\nMay 14, 2021: 0.45c -> 0.45d create_parInit, changed from <> to <= and >=\nMay 18, 2021: 0.45d -> 0.45e testing focus constrain in Tokovinin\nMay 19, 2021: 0.45e -> 0.45f expanded verbosity messages in Tokovinin algorithm\nMay 19, 2021: 0.45f -> 0.45g testing [8., 8., 8., 8., 1., 8., 8., 8., 8.] renormalization\nMay 20, 2021: 0.45g -> 0.45h do not use multi_background for image in or near focus\nMay 27, 2021: 0.45h -> 0.45i reordered variables in LN_PFS_single, in preparation for wv analysis\nMay 27, 2021: 0.45i -> 0.46 changed oversampling to be always 10\nJun 08, 2021: 0.46 -> 0.46a changed to Psf_position to be able to take only_chi and center of flux\nJun 08, 2021: 0.46a -> 0.46b changed normalization so that in focus it is indentical as in pipeline\nJun 15, 2021: 0.46b -> 0.46c change limit on the initial cut of the oversampled image,\n in order to handle bluer data\nJun 19, 2021: 0.46c -> 0.46d changed skimage.transform.resize to resize,\n to avoid skimage.transform not avaliable in LSST\nJun 20, 2021: 0.46d -> 0.46e changed scipy.signal to signal,\n and require that optPsf_cut_downsampled_scattered size is int /\n no change to unit test\nJun 24, 2021: 0.46e -> 0.47 removed resize and introduced galsim resizing in Psf_position,\n to be consistent with LSST pipeline\nJun 25, 2021: 0.47 -> 0.47a introduced galsim resizing in the first downsampling from natural resolution\n to default=10 oversampling also\nJul 11, 2021: 0.47a -> 0.47b changed a minus factor in secondary position estimation\nJul 12, 2021: 0.47b -> 0.47c inital offset in positioning had a wrong +- sign in front\nJul 23, 2021: 0.47c -> 0.47d (only) added comments and explanations\nJul 26, 2021: 0.47d -> 0.47e changed default oversampling to 11\nJul 27, 2021: 0.47e -> 0.47f offset done in galsim, but downsampling via resize function\nAug 26, 2021: 0.47f -> 0.47g direct minimization when use_center_of_flux=True\nAug 30, 2021: 0.47g -> 0.48 offset done in LSST code now\nSep 02, 2021: 0.48 -> 0.48a done cleaning offset code (PIPE2D-880)\nSep 15, 2021: 0.48a -> 0.48b removed minor bug where array_of_var_sum was called too early,\n and could fail if nan value was present\nSep 27, 2021: 0.48b -> 0.48c added explicit bool conversion to double_sources\nOct 05, 2021: 0.48c -> 0.48d further explicit bool(double_sources) covnersion in ln_pfs_single\nOct 08, 2021: 0.48d -> 0.48e Pep8 cleaning\nOct 15, 2021: 0.48e -> 0.48f forced a randomseed number in create_parInit function\nOct 25, 2021: 0.48f -> 0.49 set half of init values in create_parInit to be same as init value\nOct 26, 2021: 0.49 -> 0.49a modified create_custom_var that it does lin fit if 2nd degree fit is convex\nOct 28, 2021: 0.49a -> 0.49b modified create_custom_var so that it does not fall below min(var) value\nNov 01, 2021: 0.49b -> 0.49c create_custom_var does not change var image from step to step anymore\nNov 02, 2021: 0.49c -> 0.49d eliminated std varianble from create_simplified_H\nNov 03, 2021: 0.49d -> 0.49e PIPE2D-930; fixed reusing list_of_variance in Tokovinin\nNov 03, 2021: 0.49e -> 0.50 PIPE2D-931; modified creation of polyfit for variance image higher up\n so it is done only once per sci/var/mask image combination\nNov 20, 2021: 0.50 -> 0.50a Hilo modifications\nDec 06, 2021: 0.50a -> 0.51 Zernike_estimation_preparation class\nDec 09, 2021: 0.51 -> 0.51a introduced `fixed_single_spot`\nFeb 11, 2022: 0.51a -> 0.51b unified index parameter allowed to vary\nMar 18, 2022: 0.51b -> 0.51c introduced div_same par, controlling how many particles are same\nMar 24, 2022: 0.51c -> 0.51d multiple small changes, for running same illum in fiber\nApr 03, 2022: 0.51d -> 0.51e test is now analysis_type_fiber == \"fixed_fiber_par\"\nMay 05, 2022: 0.51e -> 0.51f added documentation\nMay 09, 2022: 0.51f -> 0.51g replaced print with logging\nMay 24, 2022: 0.51g -> 0.51h small changes to output testing directory\nMay 26, 2022: 0.51h -> 0.51i linting fixes\nJun 01, 2022: 0.51i -> 0.52 im1.setCenter(0,0), to be compatible with galsim 2.3.4\n\n@author: Neven Caplar\n@contact: [email protected]\n@web: www.ncaplar.com\n\"\"\"\n########################################\n# standard library imports\n# from __future__ import absolute_import, division, logging.info_function\nfrom functools import partial\nfrom typing import Tuple, Iterable\n\n# import matplotlib\n# from matplotlib.colors import LogNorm\n# import matplotlib.pyplot as plt\nimport lmfit\nfrom scipy.linalg import svd\nfrom scipy import signal\nfrom scipy.ndimage.filters import gaussian_filter\nimport scipy.fftpack\nimport scipy.misc\nfrom scipy.special import erf\nfrom astropy.convolution import Gaussian2DKernel\nfrom astropy.convolution import Tophat2DKernel\nimport lsst.afw.math\nimport lsst.afw.image\nimport lsst.afw\nimport lsst\nimport galsim\nimport traceback\n# import platform\nimport threading\n# from multiprocessing import current_process\nimport numpy as np\nimport os\nimport time\n# import sys\nimport math\nimport socket\nimport sys\nimport pickle\nimport logging\nos.environ[\"MKL_NUM_THREADS\"] = \"1\"\nos.environ[\"NUMEXPR_NUM_THREADS\"] = \"1\"\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nnp.set_printoptions(suppress=True)\nnp.seterr(divide='ignore', invalid='ignore')\n# logging.info(np.__config__)\n\n\n########################################\n# Related third party imports\n# none at the moment\n\n########################################\n# Local application/library specific imports\n# galsim\ngalsim.GSParams.maximum_fft_size = 12000\n\n# lsst\n\n# astropy\n# import astropy\n# import astropy.convolution\n\n# scipy\n# import scipy\n# import skimage.transform\n# import scipy.optimize as optimize\n# for svd_invert function\n\n# lmfit\n\n# matplotlib\n\n# needed for resizing routines\n\n# for distributing image creation in Tokovinin algorithm\n########################################\n\n__all__ = [\n 'PupilFactory',\n 'Pupil',\n 'ZernikeFitterPFS',\n 'LN_PFS_multi_same_spot',\n 'LN_PFS_single',\n 'LNP_PFS',\n 'find_centroid_of_flux',\n 'create_parInit',\n 'PFSPupilFactory',\n 'custom_fftconvolve',\n 'stepK',\n 'maxK',\n 'sky_scale',\n 'sky_size',\n 'remove_pupil_parameters_from_all_parameters',\n 'resize',\n '_interval_overlap',\n 'svd_invert',\n 'Tokovinin_multi',\n 'find_centroid_of_flux',\n 'create_popt_for_custom_var',\n 'create_custom_var_from_popt',\n 'Zernike_estimation_preparation']\n\n__version__ = \"0.52\"\n\n# classes Pupil, PupilFactory and PFSPupilFactory have different form of documentation,\n# compared to other classes as they have been imported from code written by Josh Meyers\n\n\nclass Pupil(object):\n \"\"\"!Pupil obscuration function.\n \"\"\"\n\n def __init__(self, illuminated, size, scale):\n \"\"\"!Construct a Pupil\n\n @param[in] illuminated 2D numpy array indicating which parts of\n the pupil plane are illuminated.\n @param[in] size Size of pupil plane array in meters. Note\n that this may be larger than the actual\n diameter of the illuminated pupil to\n accommodate zero-padding.\n @param[in] scale Sampling interval of pupil plane array in\n meters.\n \"\"\"\n self.illuminated = illuminated\n self.size = size\n self.scale = scale\n\n\nclass PupilFactory(object):\n \"\"\"!Pupil obscuration function factory for use with Fourier optics.\n\n Based on the code by Josh Meyers, developed for HSC camera\n Contains functions that can create various obscurations in the camera\n \"\"\"\n\n def __init__(\n self,\n pupilSize,\n npix,\n input_angle,\n detFrac,\n strutFrac,\n slitFrac,\n slitFrac_dy,\n x_fiber,\n y_fiber,\n effective_ilum_radius,\n frd_sigma,\n frd_lorentz_factor,\n det_vert,\n wide_0=0,\n wide_23=0,\n wide_43=0,\n misalign=0,\n verbosity=0):\n \"\"\"Construct a PupilFactory.\n Parameters\n ----------\n pupilSize: `float`\n Size of the exit pupil [m]\n npix: `int`\n Constructed Pupils will be npix x npix\n input_angle: `float`\n Angle of the pupil (for all practical purposes fixed an np.pi/2)\n detFrac: `float`\n Value determining how much of the exit pupil obscured by the\n central obscuration(detector)\n strutFrac: `float`\n Value determining how much of the exit pupil is obscured\n by a single strut\n slitFrac: `float`\n Value determining how much of the exit pupil is obscured by slit\n slitFrac_dy: `float`\n Value determining what is the vertical position of the slit\n in the exit pupil\n x_fiber: `float`\n Position of the fiber misaligment in the x direction\n y_fiber: `float`\n Position of the fiber misaligment in the y direction\n effective_ilum_radius: `float`\n Fraction of the maximal radius of the illumination\n of the exit pupil that is actually illuminated\n frd_sigma: `float`\n Sigma of Gaussian convolving only outer edge, mimicking FRD\n frd_lorentz_factor: `float`\n Strength of the lorentzian factor describing wings\n det_vert: `float`\n Multiplicative factor determining vertical size\n of the detector obscuration\n wide_0: `float`\n Widening of the strut at 0 degrees\n wide_23: `float`\n Widening of the strut at the top-left corner\n wide_43: `float`\n Widening of the strut at the bottom-left corner\n misalign: `float`\n Describing the amount of misaligment\n verbosity: `int`\n How verbose during evaluation (1 = full verbosity)\n \"\"\"\n self.verbosity = verbosity\n if self.verbosity == 1:\n logging.info('Entering PupilFactory class')\n logging.info('Entering PupilFactory class')\n\n self.pupilSize = pupilSize\n self.npix = npix\n self.input_angle = input_angle\n self.detFrac = detFrac\n self.strutFrac = strutFrac\n self.pupilScale = pupilSize / npix\n self.slitFrac = slitFrac\n self.slitFrac_dy = slitFrac_dy\n self.effective_ilum_radius = effective_ilum_radius\n self.frd_sigma = frd_sigma\n self.frd_lorentz_factor = frd_lorentz_factor\n self.det_vert = det_vert\n\n self.wide_0 = wide_0\n self.wide_23 = wide_23\n self.wide_43 = wide_43\n self.misalign = misalign\n\n u = (np.arange(npix, dtype=np.float32) - (npix - 1) / 2) * self.pupilScale\n self.u, self.v = np.meshgrid(u, u)\n\n @staticmethod\n def _pointLineDistance(p0, p1, p2):\n \"\"\"Compute the right-angle distance between the points given by `p0`\n and the line that passes through `p1` and `p2`.\n\n @param[in] p0 2-tuple of numpy arrays (x,y coords)\n @param[in] p1 2-tuple of scalars (x,y coords)\n @param[in] p2 2-tuple of scalars (x,y coords)\n @returns numpy array of distances; shape congruent to p0[0]\n \"\"\"\n x0, y0 = p0\n x1, y1 = p1\n x2, y2 = p2\n dy21 = y2 - y1\n dx21 = x2 - x1\n return np.abs(dy21 * x0 - dx21 * y0 + x2 * y1 - y2 * x1) / np.hypot(dy21, dx21)\n\n def _fullPupil(self):\n \"\"\"Make a fully-illuminated Pupil.\n\n @returns Pupil\n \"\"\"\n illuminated = np.ones(self.u.shape, dtype=np.float32)\n return Pupil(illuminated, self.pupilSize, self.pupilScale)\n\n def _cutCircleInterior(self, pupil, p0, r):\n \"\"\"Cut out the interior of a circular region from a Pupil.\n\n @param[in,out] pupil Pupil to modify in place\n @param[in] p0 2-tuple indicating region center\n @param[in] r Circular region radius\n \"\"\"\n r2 = (self.u - p0[0])**2 + (self.v - p0[1])**2\n pupil.illuminated[r2 < r**2] = False\n\n def _cutCircleExterior(self, pupil, p0, r):\n \"\"\"Cut out the exterior of a circular region from a Pupil.\n\n @param[in,out] pupil Pupil to modify in place\n @param[in] p0 2-tuple indicating region center\n @param[in] r Circular region radius\n \"\"\"\n r2 = (self.u - p0[0])**2 + (self.v - p0[1])**2\n pupil.illuminated[r2 > r**2] = False\n\n def _cutEllipseExterior(self, pupil, p0, r, b, thetarot):\n \"\"\"Cut out the exterior of a circular region from a Pupil.\n\n @param[in,out] pupil Pupil to modify in place\n @param[in] p0 2-tuple indicating region center\n @param[in] r Ellipse region radius = major axis\n @param[in] b Ellipse region radius = minor axis\n @param[in] thetarot Ellipse region rotation\n \"\"\"\n\n r2 = (self.u - p0[0])**2 + (self.v - p0[1])**2\n theta = np.arctan(self.u / self.v) + thetarot\n\n pupil.illuminated[r2 > r**2 * b**2 / (b**2 * (np.cos(theta))**2 + r**2 * (np.sin(theta))**2)] = False\n\n def _cutSquare(self, pupil, p0, r, angle, det_vert):\n \"\"\"Cut out the interior of a circular region from a Pupil.\n\n @param[in,out] pupil Pupil to modify in place\n @param[in] p0 2-tuple indicating region center\n @param[in] r half lenght of the length of square side\n @param[in] angle angle that the camera is rotated\n @param[in] det_vert multiplicative factor that distorts the square into a rectangle\n \"\"\"\n pupil_illuminated_only1 = np.ones_like(pupil.illuminated, dtype=np.float32)\n\n time_start_single_square = time.time()\n\n ###########################################################\n # Central square\n if det_vert is None:\n det_vert = 1\n\n x21 = -r / 2 * det_vert * 1\n x22 = +r / 2 * det_vert * 1\n y21 = -r / 2 * 1\n y22 = +r / 2 * 1\n i_max = self.npix / 2 - 0.5\n i_min = -i_max\n\n i_y_max = int(np.round((x22 + p0[1]) / self.pupilScale - (i_min)))\n i_y_min = int(np.round((x21 + p0[1]) / self.pupilScale - (i_min)))\n i_x_max = int(np.round((y22 + p0[0]) / self.pupilScale - (i_min)))\n i_x_min = int(np.round((y21 + p0[0]) / self.pupilScale - (i_min)))\n\n assert angle == np.pi / 2\n # angleRad = angle\n\n camX_value_for_f_multiplier = p0[0]\n camY_value_for_f_multiplier = p0[1]\n\n # logging.info(camX_value_for_f_multiplier,camY_value_for_f_multiplier)\n camY_Max = 0.02\n f_multiplier_factor = (-camX_value_for_f_multiplier * 100 / 3) * \\\n (np.abs(camY_value_for_f_multiplier) / camY_Max) + 1\n # f_multiplier_factor=1\n if self.verbosity == 1:\n logging.info('f_multiplier_factor for size of detector triangle is: ' + str(f_multiplier_factor))\n\n pupil_illuminated_only0_in_only1 = np.zeros((i_y_max - i_y_min, i_x_max - i_x_min))\n\n u0 = self.u[i_y_min:i_y_max, i_x_min:i_x_max]\n v0 = self.v[i_y_min:i_y_max, i_x_min:i_x_max]\n\n # factor that is controling how big is the triangle in the corner of the detector?\n f = 0.2\n f_multiplier = f_multiplier_factor / 1\n\n ###########################################################\n # Lower right corner\n x21 = -r / 2\n x22 = +r / 2\n y21 = -r / 2 * det_vert\n y22 = +r / 2 * det_vert\n\n f_lr = np.copy(f) * (1 / f_multiplier)\n\n angleRad21 = -np.pi / 4\n triangle21 = [[p0[0] + x22, p0[1] + y21],\n [p0[0] + x22, p0[1] + y21 - y21 * f_lr],\n [p0[0] + x22 - x22 * f_lr, p0[1] + y21]]\n\n p21 = triangle21[0]\n y22 = (triangle21[1][1] - triangle21[0][1]) / np.sqrt(2)\n y21 = 0\n x21 = (triangle21[2][0] - triangle21[0][0]) / np.sqrt(2)\n x22 = -(triangle21[2][0] - triangle21[0][0]) / np.sqrt(2)\n\n pupil_illuminated_only0_in_only1[((v0 - p21[1]) * np.cos(-angleRad21)\n - (u0 - p21[0]) * np.sin(-angleRad21) < y22)] = True\n\n ###########################################################\n # Upper left corner\n x21 = -r / 2 * 1\n x22 = +r / 2 * 1\n y21 = -r / 2 * det_vert\n y22 = +r / 2 * det_vert\n # angleRad12 = -np.pi / 4\n f_ul = np.copy(f) * (1 / f_multiplier)\n\n triangle12 = [[p0[0] + x21, p0[1] + y22],\n [p0[0] + x21, p0[1] + y22 - y22 * f_ul],\n [p0[0] + x21 - x21 * f_ul, p0[1] + y22]]\n\n p21 = triangle12[0]\n y22 = 0\n y21 = (triangle12[1][1] - triangle12[0][1]) / np.sqrt(2)\n x21 = -(triangle12[2][0] - triangle12[0][0]) / np.sqrt(2)\n x22 = +(triangle12[2][0] - triangle12[0][0]) / np.sqrt(2)\n\n pupil_illuminated_only0_in_only1[((v0 - p21[1]) * np.cos(-angleRad21)\n - (u0 - p21[0]) * np.sin(-angleRad21) > y21)] = True\n\n ###########################################################\n # Upper right corner\n x21 = -r / 2 * 1\n x22 = +r / 2 * 1\n y21 = -r / 2 * det_vert\n y22 = +r / 2 * det_vert\n f_ur = np.copy(f) * f_multiplier\n\n triangle22 = [[p0[0] + x22, p0[1] + y22],\n [p0[0] + x22, p0[1] + y22 - y22 * f_ur],\n [p0[0] + x22 - x22 * f_ur, p0[1] + y22]]\n\n p21 = triangle22[0]\n y22 = -0\n y21 = +(triangle22[1][1] - triangle22[0][1]) / np.sqrt(2)\n x21 = +(triangle22[2][0] - triangle22[0][0]) / np.sqrt(2)\n x22 = -(triangle22[2][0] - triangle22[0][0]) / np.sqrt(2)\n\n pupil_illuminated_only0_in_only1[((u0 - p21[0]) * np.cos(-angleRad21)\n + (v0 - p21[1]) * np.sin(-angleRad21) > x21)] = True\n\n ###########################################################\n # Lower left corner\n x21 = -r / 2 * 1\n x22 = +r / 2 * 1\n y21 = -r / 2 * det_vert\n y22 = +r / 2 * det_vert\n f_ll = np.copy(f) * f_multiplier\n\n triangle11 = [[p0[0] + x21, p0[1] + y21],\n [p0[0] + x21, p0[1] + y21 - y21 * f_ll],\n [p0[0] + x21 - x21 * f_ll, p0[1] + y21]]\n\n p21 = triangle11[0]\n y22 = -(triangle11[1][1] - triangle11[0][1]) / np.sqrt(2)\n y21 = 0\n x21 = +(triangle11[2][0] - triangle11[0][0]) / np.sqrt(2)\n x22 = +(triangle11[2][0] - triangle11[0][0]) / np.sqrt(2)\n\n pupil_illuminated_only0_in_only1[((u0 - p21[0]) * np.cos(-angleRad21)\n + (v0 - p21[1]) * np.sin(-angleRad21) < x22)] = True\n\n pupil_illuminated_only1[i_y_min:i_y_max, i_x_min:i_x_max] = pupil_illuminated_only0_in_only1\n\n pupil.illuminated = pupil.illuminated * pupil_illuminated_only1\n time_end_single_square = time.time()\n\n if self.verbosity == 1:\n logging.info('Time for cutting out the square is '\n + str(time_end_single_square - time_start_single_square))\n\n def _cutRay(self, pupil, p0, angle, thickness, angleunit=None, wide=0):\n \"\"\"Cut out a ray from a Pupil.\n\n @param[in,out] pupil Pupil to modify in place\n @param[in] p0 2-tuple indicating ray starting point\n @param[in] angle Ray angle measured CCW from +x.\n @param[in] thickness Thickness of cutout\n @param[in] angleunit If None, changes internal units to radians\n @param[in] wide Controls the widening of the strut as\n a function of the distance from the origin\n\n\n \"\"\"\n if angleunit is None:\n angleRad = angle.asRadians()\n else:\n angleRad = angle\n # the 1 is arbitrary, just need something to define another point on\n # the line\n\n p1 = (p0[0] + 1, p0[1] + np.tan(angleRad))\n d = PupilFactory._pointLineDistance((self.u, self.v), p0, p1)\n\n radial_distance = 14.34 * np.sqrt((self.u - p0[0])**2 + (self.v - p0[1])**2)\n\n pupil.illuminated[(d < 0.5 * thickness * (1 + wide * radial_distance))\n & ((self.u - p0[0]) * np.cos(angleRad)\n + (self.v - p0[1]) * np.sin(angleRad) >= 0)] = False\n\n def _addRay(self, pupil, p0, angle, thickness, angleunit=None):\n \"\"\"Add a ray from a Pupil.\n\n @param[in,out] pupil Pupil to modify in place\n @param[in] p0 2-tuple indicating ray starting point\n @param[in] angle Ray angle measured CCW from +x.\n @param[in] thickness Thickness of cutout\n \"\"\"\n if angleunit is None:\n angleRad = angle.asRadians()\n else:\n angleRad = angle\n # the 1 is arbitrary, just need something to define another point on\n # the line\n p1 = (p0[0] + 1, p0[1] + np.tan(angleRad))\n d = PupilFactory._pointLineDistance((self.u, self.v), p0, p1)\n pupil.illuminated[(d < 0.5 * thickness)\n & ((self.u - p0[0]) * np.cos(angleRad)\n + (self.v - p0[1]) * np.sin(angleRad) >= 0)] = True\n\n\nclass PFSPupilFactory(PupilFactory):\n \"\"\"Pupil obscuration function factory for PFS\n\n Based on the code by Josh Meyers, initially developed for HSC camera\n Invokes PupilFactory to create obscurations of the camera\n Adds various illumination effects which are specified to the spectrographs\n \"\"\"\n\n def __init__(\n self,\n pupilSize,\n npix,\n input_angle,\n detFrac,\n strutFrac,\n slitFrac,\n slitFrac_dy,\n x_fiber,\n y_fiber,\n effective_ilum_radius,\n frd_sigma,\n frd_lorentz_factor,\n det_vert,\n slitHolder_frac_dx,\n wide_0=0,\n wide_23=0,\n wide_43=0,\n misalign=0,\n verbosity=0):\n \"\"\"!Construct a PupilFactory.\n\n\n Parameters\n ----------\n pupilSize: `float`\n Size of the exit pupil [m]\n npix: `int`\n Constructed Pupils will be npix x npix\n input_angle: `float`\n Angle of the pupil (for all practical purposes fixed an np.pi/2)\n detFrac: `float`\n Value determining how much of the exit pupil obscured by the\n central obscuration(detector)\n strutFrac: `float`\n Value determining how much of the exit pupil is obscured\n by a single strut\n slitFrac: `float`\n Value determining how much of the exit pupil is obscured by slit\n slitFrac_dy: `float`\n Value determining what is the vertical position of the slit\n in the exit pupil\n x_fiber: `float`\n Position of the fiber misaligment in the x direction\n y_fiber: `float`\n Position of the fiber misaligment in the y direction\n effective_ilum_radius: `float`\n Fraction of the maximal radius of the illumination\n of the exit pupil that is actually illuminated\n frd_sigma: `float`\n Sigma of Gaussian convolving only outer edge, mimicking FRD\n frd_lorentz_factor: `float`\n Strength of the lorentzian factor describing wings\n det_vert: `float`\n Multiplicative factor determining vertical size\n of the detector obscuration\n wide_0: `float`\n Widening of the strut at 0 degrees\n wide_23: `float`\n Widening of the strut at the top-left corner\n wide_43: `float`\n Widening of the strut at the bottom-left corner\n misalign: `float`\n Describing the amount of misaligment\n verbosity: `int`\n How verbose during evaluation (1 = full verbosity)\n \"\"\"\n self.verbosity = verbosity\n if self.verbosity == 1:\n logging.info('Entering PFSPupilFactory class')\n\n PupilFactory.__init__(\n self,\n pupilSize,\n npix,\n input_angle,\n detFrac,\n strutFrac,\n slitFrac,\n slitFrac_dy,\n x_fiber,\n y_fiber,\n effective_ilum_radius,\n frd_sigma,\n frd_lorentz_factor,\n det_vert,\n verbosity=self.verbosity,\n wide_0=wide_0,\n wide_23=wide_23,\n wide_43=wide_43,\n misalign=misalign)\n\n self.x_fiber = x_fiber\n self.y_fiber = y_fiber\n self.slitHolder_frac_dx = slitHolder_frac_dx\n self._spiderStartPos = [np.array([0., 0.]), np.array([0., 0.]), np.array([0., 0.])]\n self._spiderAngles = [0, np.pi * 2 / 3, np.pi * 4 / 3]\n self.effective_ilum_radius = effective_ilum_radius\n\n self.wide_0 = wide_0\n self.wide_23 = wide_23\n self.wide_43 = wide_43\n self.misalign = misalign\n\n def getPupil(self, point):\n \"\"\"!Calculate a Pupil at a given point in the focal plane.\n\n @param point Point2D indicating focal plane coordinates.\n @returns Pupil\n \"\"\"\n if self.verbosity == 1:\n logging.info('Entering getPupil (function inside PFSPupilFactory)')\n\n # called subaruRadius as it was taken from the code fitting pupil for HSC on Subaru\n subaruRadius = (self.pupilSize / 2) * 1\n\n detFrac = self.detFrac # linear fraction\n hscRadius = detFrac * subaruRadius\n slitFrac = self.slitFrac # linear fraction\n subaruSlit = slitFrac * subaruRadius\n strutFrac = self.strutFrac # linear fraction\n subaruStrutThick = strutFrac * subaruRadius\n\n # y-position of the slit\n slitFrac_dy = self.slitFrac_dy\n\n # relic from the HSC code\n # See DM-8589 for more detailed description of following parameters\n # d(lensCenter)/d(theta) in meters per degree\n # lensRate = 0.0276 * 3600 / 128.9 * subaruRadius\n # d(cameraCenter)/d(theta) in meters per degree\n hscRate = 2.62 / 1000 * subaruRadius\n hscPlateScale = 380\n thetaX = point[0] * hscPlateScale\n thetaY = point[1] * hscPlateScale\n\n pupil = self._fullPupil()\n\n camX = thetaX * hscRate\n camY = thetaY * hscRate\n\n # creating FRD effects\n single_element = np.linspace(-1, 1, len(pupil.illuminated), endpoint=True, dtype=np.float32)\n u_manual = np.tile(single_element, (len(single_element), 1))\n v_manual = np.transpose(u_manual)\n center_distance = np.sqrt((u_manual - self.x_fiber * hscRate * hscPlateScale * 12)\n ** 2 + (v_manual - self.y_fiber * hscRate * hscPlateScale * 12)**2)\n frd_sigma = self.frd_sigma\n sigma = 2 * frd_sigma\n\n pupil_frd = (1 / 2 * (scipy.special.erf((-center_distance + self.effective_ilum_radius) / sigma)\n + scipy.special.erf((center_distance + self.effective_ilum_radius) / sigma)))\n\n ################\n # Adding misaligment in this section\n time_misalign_start = time.time()\n\n position_of_center_0 = np.where(center_distance == np.min(center_distance))\n position_of_center = [position_of_center_0[1][0], position_of_center_0[0][0]]\n\n position_of_center_0_x = position_of_center_0[0][0]\n position_of_center_0_y = position_of_center_0[1][0]\n\n distances_to_corners = np.array([np.sqrt(position_of_center[0]**2 + position_of_center[1]**2),\n np.sqrt((len(pupil_frd) - position_of_center[0])**2\n + position_of_center[1]**2),\n np.sqrt((position_of_center[0])**2\n + (len(pupil_frd) - position_of_center[1])**2),\n np.sqrt((len(pupil_frd) - position_of_center[0])**2\n + (len(pupil_frd) - position_of_center[1])**2)])\n\n max_distance_to_corner = np.max(distances_to_corners)\n threshold_value = 0.5\n left_from_center = np.where(pupil_frd[position_of_center_0_x]\n [0:position_of_center_0_y] < threshold_value)[0]\n right_from_center = \\\n np.where(pupil_frd[position_of_center_0_x][position_of_center_0_y:] < threshold_value)[0] +\\\n position_of_center_0_y\n\n up_from_center = \\\n np.where(pupil_frd[:, position_of_center_0_y][position_of_center_0_x:] < threshold_value)[0] +\\\n position_of_center_0_x\n down_from_center = np.where(pupil_frd[:, position_of_center_0_y]\n [:position_of_center_0_x] < threshold_value)[0]\n\n if len(left_from_center) > 0:\n size_of_05_left = position_of_center_0_y - np.max(left_from_center)\n else:\n size_of_05_left = 0\n\n if len(right_from_center) > 0:\n size_of_05_right = np.min(right_from_center) - position_of_center_0_y\n else:\n size_of_05_right = 0\n\n if len(up_from_center) > 0:\n size_of_05_up = np.min(up_from_center) - position_of_center_0_x\n else:\n size_of_05_up = 0\n\n if len(down_from_center) > 0:\n size_of_05_down = position_of_center_0_x - np.max(down_from_center)\n else:\n size_of_05_down = 0\n\n sizes_4_directions = np.array([size_of_05_left, size_of_05_right, size_of_05_up, size_of_05_down])\n max_size = np.max(sizes_4_directions)\n imageradius = max_size\n\n radiusvalues = np.linspace(\n 0, int(\n np.ceil(max_distance_to_corner)), int(\n np.ceil(max_distance_to_corner)) + 1)\n\n sigtotp = sigma * 550\n\n dif_due_to_mis_class = Pupil_misalign(radiusvalues, imageradius, sigtotp, self.misalign)\n dif_due_to_mis = dif_due_to_mis_class()\n\n scaling_factor_pixel_to_physical = max_distance_to_corner / np.max(center_distance)\n distance_int = np.round(center_distance * scaling_factor_pixel_to_physical).astype(int)\n\n pupil_frd_with_mis = pupil_frd + dif_due_to_mis[distance_int]\n pupil_frd_with_mis[pupil_frd_with_mis > 1] = 1\n\n time_misalign_end = time.time()\n\n if self.verbosity == 1:\n logging.info('Time to execute illumination considerations due to misalignment '\n + str(time_misalign_end - time_misalign_start))\n\n ####\n pupil_lorentz = (np.arctan(2 * (self.effective_ilum_radius - center_distance) / (4 * sigma))\n + np.arctan(2 * (self.effective_ilum_radius + center_distance) / (4 * sigma))) /\\\n (2 * np.arctan((2 * self.effective_ilum_radius) / (4 * sigma)))\n\n pupil_frd = np.copy(pupil_frd_with_mis)\n pupil.illuminated = (pupil_frd + 1 * self.frd_lorentz_factor\n * pupil_lorentz) / (1 + self.frd_lorentz_factor)\n\n # Cout out the acceptance angle of the camera\n self._cutCircleExterior(pupil, (0.0, 0.0), subaruRadius)\n\n # Cut out detector shadow\n self._cutSquare(pupil, (camX, camY), hscRadius, self.input_angle, self.det_vert)\n\n # No vignetting of this kind for the spectroscopic camera\n # self._cutCircleExterior(pupil, (lensX, lensY), lensRadius)\n\n # Cut out spider shadow\n for pos, angle in zip(self._spiderStartPos, self._spiderAngles):\n x = pos[0] + camX\n y = pos[1] + camY\n\n if angle == 0:\n # logging.info('cutRay applied to strut at angle '+str(angle))\n self._cutRay(pupil, (x, y), angle, subaruStrutThick, 'rad', self.wide_0)\n if angle == np.pi * 2 / 3:\n # logging.info('cutRay applied to strut at angle '+str(angle))\n self._cutRay(pupil, (x, y), angle, subaruStrutThick, 'rad', self.wide_23)\n if angle == np.pi * 4 / 3:\n # logging.info('cutRay applied to strut at angle '+str(angle))\n self._cutRay(pupil, (x, y), angle, subaruStrutThick, 'rad', self.wide_43)\n\n # cut out slit shadow\n self._cutRay(pupil, (2, slitFrac_dy / 18), -np.pi, subaruSlit * 1.05, 'rad')\n\n # cut out slit holder shadow\n # subaruSlit/3 is roughly the width of the holder\n self._cutRay(pupil, (self.slitHolder_frac_dx / 18, 1), -np.pi / 2, subaruSlit * 0.3, 'rad')\n\n if self.verbosity == 1:\n logging.info('Finished with getPupil')\n\n return pupil\n\n\nclass Pupil_misalign(object):\n \"\"\"Apply misaligment correction to the illumination of the pupil\n\n Developed by Brent Belland (Caltech)\n Copied here without modifications\n \"\"\"\n\n def __init__(self, radiusvalues, imageradius, sigtotp, misalign):\n\n self.radiusvalues = radiusvalues\n self.imageradius = imageradius\n self.sigtotp = sigtotp\n self.misalign = misalign\n\n def wapp(self, A):\n # Approximation function by Jim Gunn to approximate and correct for the\n # widening of width due to the angular misalignment convolution. This\n # is used to basically scale the contribution of angular misalignment and FRD\n # A = angmis/sigFRD\n wappA = np.sqrt(1 + A * A * (1 + A * A) / (2 + 1.5 * A * A))\n return wappA\n\n def fcorr(self, x, A):\n # The function scaled so that it keeps the same (approximate) width value\n # after angular convolution\n correctedfam = self.fcon(x * self.wapp(A), A)\n return correctedfam\n\n def fcon(self, x, A):\n # For more detail about this method, see \"Analyzing Radial Profiles for FRD\n # and Angular Misalignment\", by Jim Gunn, 16/06/13.\n wt = [0.1864, 0.1469, 0.1134, 0.1066, 0.1134, 0.1469, 0.1864] # from Jim Gunn's white paper,\n # wt contains the normalized integrals under the angular misalignment\n # convolution kernel, i.e., C(1-(x/angmisp)^2)^{-1/2} for |x|<angmisp and 0\n # elsewhere. Note that the edges' centers are at +/- a, so they are\n # integrated over an effective half of the length of the others.\n temp = np.zeros(np.size(x))\n for index in range(7):\n temp = temp + wt[index] * self.ndfc(x + (index - 3) / 3 * A)\n angconvolved = temp\n return angconvolved\n\n def ndfc(self, x):\n # Standard model dropoff from a Gaussian convolution, normalized to brightness 1,\n # radius (rh) 0, and sigTOT 1\n # logging.info(len(x))\n ndfcfun = 1 - (0.5 * erf(x / np.sqrt(2)) + 0.5)\n return ndfcfun\n\n def FA(self, r, rh, sigTOT, A):\n # Function that takes all significant variables of the dropoff and\n # normalizes the curve to be comparable to ndfc\n # r = vector of radius values, in steps of pixels\n # rh = radius of half-intensity. Effectively the size of the radius of the dropoff\n # sigTOT = total width of the convolution kernel that recreates the width of the dropoff\n # between 85% and 15% illumination. Effectively just think of this as sigma\n # A = angmis/sigFRD, that is, the ratio between the angular misalignment\n # and the sigma due to only FRD. Usually this is on the order of 1-3.\n FitwithAngle = self.fcorr((r - rh) / sigTOT, A)\n return FitwithAngle\n\n def __call__(self):\n\n no_mis = self.FA(self.radiusvalues, self.imageradius, self.sigtotp, 0)\n with_mis = self.FA(self.radiusvalues, self.imageradius, self.sigtotp, self.misalign)\n dif_due_to_mis = with_mis - no_mis\n\n return dif_due_to_mis\n\n\nclass ZernikeFitterPFS(object):\n\n \"\"\"Create a model images for PFS\n\n Despite its name, it does not actually ``fits'' the paramters describing the donuts,\n it ``just'' creates the images\n\n The final image is made by the convolution of\n 1. an OpticalPSF (constructed using FFT)\n 2. an input fiber image\n 3. and other convolutions such as CCD charge diffusion\n\n The OpticalPSF part includes\n 1.1. description of pupil\n 1.2. specification of an arbitrary number of zernike wavefront aberrations\n\n This code uses lmfit to initalize the parameters.\n\n Calls Psf_position\n Calls Pupil classes (which ones?)\n\n Called by LN_PFS_Single (function constructModelImage_PFS_naturalResolution)\n \"\"\"\n\n def __init__(self, image=np.ones((20, 20)), image_var=np.ones((20, 20)),\n image_mask=None, pixelScale=20.76, wavelength=794,\n diam_sic=139.5327e-3, npix=1536, pupilExplicit=None,\n wf_full_Image=None,\n ilum_Image=None, dithering=1, save=None,\n pupil_parameters=None, use_pupil_parameters=None, use_optPSF=None, use_wf_grid=None,\n zmaxInit=None, extraZernike=None, simulation_00=None, verbosity=None,\n double_sources=None, double_sources_positions_ratios=None, test_run=None,\n explicit_psf_position=None, use_only_chi=False, use_center_of_flux=False,\n PSF_DIRECTORY=None, *args):\n \"\"\"\n Parameters\n ----------\n image: `np.array`, (N, N)\n image that you wish to model\n if you do not pass the image that you wish to compare,\n the algorithm will default to creating 20x20 image that has\n value of '1' everywhere\n image_var: `np.array`, (N, N)\n variance image\n if you do not pass the variance image,\n the algorithm will default to creating 20x20 image that has\n value of '1' everywhere\n image_mask: `np.array`, (N, N)\n mask image\n pixelScale: `float`\n pixel scale in arcseconds\n This is size of the pixel in arcsec for PFS red arm in focus\n calculated with http://www.wilmslowastro.com/software/formulae.htm\n pixel size in microns/focal length in mm x 206.3\n pixel size = 15 microns, focal length = 149.2 mm\n (138 aperature x 1.1 f number)\n wavelength: `float`\n wavelength of the psf [nm]\n if you do not pass the value for wavelength it will default to 794 nm,\n which is roughly in the middle of the red detector\n diam_sic: `float`\n size of the exit pupil [m]\n Exit pupil size in focus, default is 139.5237e-3 meters\n (taken from Zemax)\n npix: `int`\n size of 2d array contaning exit pupil illumination\n pupilExplicit: `np.array`, (Np, Np)\n if avaliable, uses this image for pupil instead of\n creating it from supplied parameters\n wf_full_Image: `np.array`, (Np, Np)\n wavefront image\n if avaliable, uses this image for wavefront instead of\n creating it from supplied parameters\n dithering: `int`\n dithering scale (most likely 1 or 2)\n save: `int`\n if 1, save various intermediate results, for testing purposes\n needs to set up also PSF_DIRECTORY\n use_optPSF: `np.array`, (Np, Np)\n if provided skip creation of optical psf, only do postprocessing\n use_wf_grid: `np.array`, (Ny, Nx)\n if provided, use this explicit wavefront map\n zmaxInit: `int`\n highest Zernike order (11 or 22)\n extraZernike: `np.array`, (N)\n if provided, simulated Zernike orders higher than 22\n simulation_00: `np.array`, (2,)\n places optical center at the center of the final image\n verbosity: `int`\n verbosity during evaluations\n double_sources:\n is there a second source present in the image\n double_sources_positions_ratios: `np.arrray`, (2,)\n initial guess for the position and strength of the second source\n explicit_psf_position: `np.array`, (2,)\n explicit position where to place optical psf\n use_only_chi: `bool`\n if True, fit to minimize np.abs(chi), and not chi**2\n use_center_of_flux: `bool`\n if True, fit to minimize the distance between the center of flux\n for the model and the input image\n PSF_DIRECTORY: `str`\n where will intermediate outputs be saved for testing purposes\n Notes\n ----------\n Creates a model image that is fitted to the input sicence image\n The model image is made by the convolution of\n 1. an OpticalPSF (constructed using FFT)\n created with _getOptPsf_naturalResolution\n The OpticalPSF part includes\n 1.1. description of pupil\n created with get_Pupil\n 1.2. specification of an arbitrary number of\n zernike wavefront aberrations,\n which are input to galsim.phase_screens.OpticalScreen\n 2. an input fiber image and other convolutions such as\n CCD charge diffusion created with _optPsf_postprocessing\n This code uses lmfit to initalize the parameters.\n Calls class PsfPosition\n Calls class PFSPupilFactory\n\n Examples\n ----------\n Simple exampe with initial parameters, changing only one parameter\n >>> zmax = 22\n >>> single_image_analysis = ZernikeFitterPFS(zmaxInit = zmax,\n verbosity=1)\n >>> single_image_analysis.initParams()\n >>> single_image_analysis.params['detFrac'] =\\\n lmfit.Parameter(name='detFrac', value=0.70)\n >>> resulting_image, psf_pos =\\\n single_image_analysis.constructModelImage_PFS_naturalResolution()\n \"\"\"\n\n self.image = image\n self.image_var = image_var\n if image_mask is None:\n image_mask = np.zeros(image.shape)\n self.image_mask = image_mask\n self.wavelength = wavelength\n self.diam_sic = diam_sic\n self.npix = npix\n self.dithering = dithering\n self.pixelScale = pixelScale\n self.pixelScale_effective = self.pixelScale / dithering\n\n if save in (None, 0):\n save = None\n else:\n save = 1\n self.save = save\n self.use_optPSF = use_optPSF\n\n # puilExplicit can be used to pass explicitly the image of the pupil\n # instead of creating it from the supplied parameters\n if pupilExplicit is None:\n pupilExplicit is False\n self.pupilExplicit = pupilExplicit\n\n if pupil_parameters is None:\n self.pupil_parameters = pupil_parameters\n else:\n self.pupil_parameters = pupil_parameters\n\n if use_pupil_parameters is None:\n self.use_pupil_parameters = use_pupil_parameters\n else:\n self.use_pupil_parameters = use_pupil_parameters\n self.args = args\n\n self.use_wf_grid = use_wf_grid\n self.zmax = zmaxInit\n\n self.simulation_00 = simulation_00\n if self.simulation_00:\n self.simulation_00 = 1\n\n self.extraZernike = extraZernike\n self.verbosity = verbosity\n self.double_sources = double_sources\n self.double_sources_positions_ratios = double_sources_positions_ratios\n\n self.test_run = test_run\n\n self.explicit_psf_position = explicit_psf_position\n self.use_only_chi = use_only_chi\n self.use_center_of_flux = use_center_of_flux\n self.flux = float(np.sum(image))\n\n try:\n if not explicit_psf_position:\n self.explicit_psf_position = None\n except BaseException:\n pass\n\n self.PSF_DIRECTORY = PSF_DIRECTORY\n ############################################################\n if self.PSF_DIRECTORY is None:\n # names of default directories where I often work\n if socket.gethostname() == 'IapetusUSA':\n self.PSF_DIRECTORY = '/Volumes/Saturn_USA/PFS/'\n elif socket.gethostname() == 'pfsa-usr01-gb.subaru.nao.ac.jp' or \\\n socket.gethostname() == 'pfsa-usr02-gb.subaru.nao.ac.jp':\n self.PSF_DIRECTORY = '/work/ncaplar/'\n else:\n self.PSF_DIRECTORY = '/tigress/ncaplar/PFS/'\n\n if self.PSF_DIRECTORY is not None:\n self.TESTING_FOLDER = self.PSF_DIRECTORY + 'Testing/'\n self.TESTING_PUPIL_IMAGES_FOLDER = self.TESTING_FOLDER + 'Pupil_Images/'\n self.TESTING_WAVEFRONT_IMAGES_FOLDER = self.TESTING_FOLDER + 'Wavefront_Images/'\n self.TESTING_FINAL_IMAGES_FOLDER = self.TESTING_FOLDER + 'Final_Images/'\n\n if self.verbosity == 1:\n # check the versions of the most important libraries\n logging.info('np.__version__' + str(np.__version__))\n logging.info('scipy.__version__' + str(scipy.__version__))\n\n def initParams(\n self,\n z4Init=None,\n detFracInit=None,\n strutFracInit=None,\n focalPlanePositionInit=None,\n slitFracInit=None,\n slitFrac_dy_Init=None,\n wide_0Init=None,\n wide_23Init=None,\n wide_43Init=None,\n radiometricEffectInit=None,\n radiometricExponentInit=None,\n x_ilumInit=None,\n y_ilumInit=None,\n pixel_effectInit=None,\n backgroundInit=None,\n x_fiberInit=None,\n y_fiberInit=None,\n effective_ilum_radiusInit=None,\n frd_sigmaInit=None,\n frd_lorentz_factorInit=None,\n misalignInit=None,\n det_vertInit=None,\n slitHolder_frac_dxInit=None,\n grating_linesInit=None,\n scattering_slopeInit=None,\n scattering_amplitudeInit=None,\n fiber_rInit=None,\n fluxInit=None):\n \"\"\"Initialize lmfit Parameters object.\n\n\n Allows to set up all parameters describing the pupil and\n Zernike parameter (up to z22) explicitly. If any value is not passed,\n it will be substituted by a default value (specified below).\n Parameters\n ----------\n zmax: `int`\n Total number of Zernike aberrations used (11 or 22)\n Possible to add more with extra_zernike parameter\n z4Init: `float`\n Initial Z4 aberration value in waves (that is 2*np.pi*wavelengths)\n # pupil parameters\n detFracInit: `float`\n Value determining how much of the exit pupil obscured by the\n central obscuration(detector)\n strutFracInit: `float`\n Value determining how much of the exit pupil is obscured\n by a single strut\n focalPlanePositionInit: (`float`, `float`)\n 2-tuple for position of the central obscuration(detector)\n in the focal plane\n slitFracInit: `float`\n Value determining how much of the exit pupil is obscured by slit\n slitFrac_dy_Init: `float`\n Value determining what is the vertical position of the slit\n in the exit pupil\n # parameters dsecribing individual struts\n wide_0Init: `float`\n Parameter describing widening of the strut at 0 degrees\n wide_23Init: `float`\n Parameter describing widening of the top-left strut\n wide_34Init: `float`\n Parameter describing widening of the bottom-left strut\n #non-uniform illumination\n radiometricEffectInit: `float`\n parameter describing non-uniform illumination of the pupil\n (1-params['radiometricEffect']**2*r**2)**\\\n (params['radiometricExponent']) [DEPRECATED]\n radiometricExponentInit: `float`\n parameter describing non-uniform illumination of the pupil\n (1-params['radiometricEffect']**2*r**2)\\\n **(params['radiometricExponent'])\n x_ilumInit: `float`\n x-position of the center of illumination\n of the exit pupil [DEPRECATED]\n y_ilumInit: `float`\n y-position of the center of illumination\n of the exit pupil [DEPRECATED]\n # illumination due to fiber, parameters\n x_fiberInit: `float`\n position of the fiber misaligment in the x direction\n y_fiberInit: `float`\n position of the fiber misaligment in the y direction\n effective_ilum_radiusInit: `float`\n fraction of the maximal radius of the illumination\n of the exit pupil that is actually illuminated\n frd_sigma: `float`\n sigma of Gaussian convolving only outer edge, mimicking FRD\n frd_lorentz_factor: `float`\n strength of the lorentzian factor describing wings\n of the pupil illumination\n misalign: `float`\n amount of misaligment in the illumination\n # further pupil parameters\n det_vert: `float\n multiplicative factor determining vertical size\n of the detector obscuration\n slitHolder_frac_dx: `float`\n dx position of slit holder\n # convolving (postprocessing) parameters\n grating_lines: `int`\n number of effective lines in the grating\n scattering_slopeInit: `float`\n slope of scattering\n scattering_amplitudeInit: `float`\n amplitude of scattering compared to optical PSF\n pixel_effectInit: `float`\n sigma describing charge diffusion effect [in units of 15 microns]\n fiber_rInit: `float`\n radius of perfect tophat fiber, as seen on the detector\n [in units of 15 microns]\n fluxInit: `float`\n total flux in generated image compared to input image\n (needs to be 1 or very close to 1)\n \"\"\"\n if self.verbosity == 1:\n logging.info(' ')\n logging.info('Initializing ZernikeFitterPFS')\n logging.info('Verbosity parameter is: ' + str(self.verbosity))\n logging.info('Highest Zernike polynomial is (zmax): ' + str(self.zmax))\n\n params = lmfit.Parameters()\n # Zernike parameters\n z_array = []\n\n if z4Init is None:\n params.add('z4', 0.0)\n else:\n params.add('z4', z4Init)\n\n for i in range(5, self.zmax + 1):\n params.add('z{}'.format(i), 0.0)\n\n # pupil parameters\n if detFracInit is None:\n params.add('detFrac', 0.65)\n else:\n params.add('detFrac', detFracInit)\n\n if strutFracInit is None:\n params.add('strutFrac', 0.07)\n else:\n params.add('strutFrac', strutFracInit)\n\n if focalPlanePositionInit is None:\n params.add('dxFocal', 0.0)\n params.add('dyFocal', 0.0)\n else:\n params.add('dxFocal', focalPlanePositionInit[0])\n params.add('dyFocal', focalPlanePositionInit[1])\n\n if slitFracInit is None:\n params.add('slitFrac', 0.05)\n else:\n params.add('slitFrac', slitFracInit)\n\n if slitFrac_dy_Init is None:\n params.add('slitFrac_dy', 0)\n else:\n params.add('slitFrac_dy', slitFrac_dy_Init)\n\n # parameters dsecribing individual struts\n if wide_0Init is None:\n params.add('wide_0', 0)\n else:\n params.add('wide_0', wide_0Init)\n\n if wide_23Init is None:\n params.add('wide_23', 0)\n else:\n params.add('wide_23', wide_23Init)\n\n if wide_43Init is None:\n params.add('wide_43', 0)\n else:\n params.add('wide_43', wide_43Init)\n\n # non-uniform illumination\n if radiometricExponentInit is None:\n params.add('radiometricExponent', 0.25)\n else:\n params.add('radiometricExponent', radiometricExponentInit)\n\n if radiometricEffectInit is None:\n params.add('radiometricEffect', 0)\n else:\n params.add('radiometricEffect', radiometricEffectInit)\n\n if x_ilumInit is None:\n params.add('x_ilum', 1)\n else:\n params.add('x_ilum', x_ilumInit)\n\n if y_ilumInit is None:\n params.add('y_ilum', 1)\n else:\n params.add('y_ilum', y_ilumInit)\n\n # illumination due to fiber, parameters\n if x_ilumInit is None:\n params.add('x_fiber', 1)\n else:\n params.add('x_fiber', x_fiberInit)\n\n if y_fiberInit is None:\n params.add('y_fiber', 0)\n else:\n params.add('y_fiber', y_fiberInit)\n\n if effective_ilum_radiusInit is None:\n params.add('effective_ilum_radius', 0.9)\n else:\n params.add('effective_ilum_radius', effective_ilum_radiusInit)\n\n if frd_sigmaInit is None:\n params.add('frd_sigma', 0.02)\n else:\n params.add('frd_sigma', frd_sigmaInit)\n\n if frd_lorentz_factorInit is None:\n params.add('frd_lorentz_factor', 0.5)\n else:\n params.add('frd_lorentz_factor', frd_lorentz_factorInit)\n\n if misalignInit is None:\n params.add('misalign', 0)\n else:\n params.add('misalign', misalignInit)\n\n # further pupil parameters\n if det_vertInit is None:\n params.add('det_vert', 1)\n else:\n params.add('det_vert', det_vertInit)\n\n if slitHolder_frac_dxInit is None:\n params.add('slitHolder_frac_dx', 0)\n else:\n params.add('slitHolder_frac_dx', slitHolder_frac_dxInit)\n\n # convolving (postprocessing) parameters\n if grating_linesInit is None:\n params.add('grating_lines', 100000)\n else:\n params.add('grating_lines', grating_linesInit)\n\n if scattering_slopeInit is None:\n params.add('scattering_slope', 2)\n else:\n params.add('scattering_slope', scattering_slopeInit)\n\n if scattering_amplitudeInit is None:\n params.add('scattering_amplitude', 10**-2)\n else:\n params.add('scattering_amplitude', scattering_amplitudeInit)\n\n if pixel_effectInit is None:\n params.add('pixel_effect', 0.35)\n else:\n params.add('pixel_effect', pixel_effectInit)\n\n if fiber_rInit is None:\n params.add('fiber_r', 1.8)\n else:\n params.add('fiber_r', fiber_rInit)\n\n if fluxInit is None:\n params.add('flux', 1)\n else:\n params.add('flux', fluxInit)\n\n self.params = params\n self.optPsf = None\n self.z_array = z_array\n\n def constructModelImage_PFS_naturalResolution(\n self,\n params=None,\n shape=None,\n pixelScale=None,\n use_optPSF=None,\n extraZernike=None,\n return_intermediate_images=False):\n \"\"\"Construct model image given the set of parameters\n Parameters\n ----------\n params : `lmfit.Parameters` object or python dictionary\n Parameters describing model; None to use self.params\n shape : `(int, int)`\n Shape for model image; None to use the shape of self.maskedImage\n pixelScale : `float`\n Pixel scale in arcseconds to use for model image;\n None to use self.pixelScale.\n use_optPSF : `bool`\n If True, use previously generated optical PSF,\n skip _getOptPsf_naturalResolution, and conduct only postprocessing\n extraZernike : `np.array`, (N,)\n Zernike parameteres beyond z22\n return_intermediate_images : `bool`\n If True, return intermediate images created during the run\n This is in order to help with debugging and inspect\n the images created during the process\n Return\n ----------\n (if not return_intermediate_images)\n optPsf_final : `np.array`, (N, N)\n Final model image\n psf_position : np.array, (2,)\n Position where image is centered\n (if return_intermediate_images)\n optPsf_final : `np.array`, (N, N)\n Final model image\n ilum : `np.array`, (N, N)\n Illumination array\n wf_grid_rot : `np.array`, (N, N)\n Wavefront array\n psf_position : np.array, (2,)\n Position where image is centered\n Notes\n ----------\n Calls _getOptPsf_naturalResolution and optPsf_postprocessing\n \"\"\"\n if self.verbosity == 1:\n logging.info(' ')\n logging.info('Entering constructModelImage_PFS_naturalResolution')\n\n if params is None:\n params = self.params\n if shape is None:\n shape = self.image.shape\n if pixelScale is None:\n pixelScale = self.pixelScale\n logging.info('pixelScale_1573'+str(pixelScale))\n try:\n parameter_values = params.valuesdict()\n except AttributeError:\n parameter_values = params\n use_optPSF = self.use_optPSF\n\n if extraZernike is None:\n pass\n else:\n extraZernike = list(extraZernike)\n self.extraZernike = extraZernike\n\n # if you did not pass pure optical psf image, create one here\n if use_optPSF is None:\n # change outputs depending on if you want intermediate results\n if not return_intermediate_images:\n optPsf = self._getOptPsf_naturalResolution(\n parameter_values, return_intermediate_images=return_intermediate_images)\n else:\n optPsf, ilum, wf_grid_rot = self._getOptPsf_naturalResolution(\n parameter_values, return_intermediate_images=return_intermediate_images)\n else:\n # if you claimed to have supplied optical psf image,\n # but none is provided still create one\n if self.optPsf is None:\n if not return_intermediate_images:\n optPsf = self._getOptPsf_naturalResolution(\n parameter_values, return_intermediate_images=return_intermediate_images)\n else:\n optPsf, ilum, wf_grid_rot = self._getOptPsf_naturalResolution(\n parameter_values, return_intermediate_images=return_intermediate_images)\n self.optPsf = optPsf\n else:\n optPsf = self.optPsf\n\n # at the moment, no difference in optPsf_postprocessing depending on return_intermediate_images\n optPsf_final, psf_position = self._optPsf_postprocessing(\n optPsf, return_intermediate_images=return_intermediate_images)\n\n if self.save == 1:\n np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf', optPsf)\n np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_final', optPsf_final)\n else:\n pass\n\n if not return_intermediate_images:\n return optPsf_final, psf_position\n if return_intermediate_images:\n return optPsf_final, ilum, wf_grid_rot, psf_position\n\n if self.verbosity == 1:\n logging.info('Finished with constructModelImage_PFS_naturalResolution')\n logging.info(' ')\n\n def _optPsf_postprocessing(self, optPsf, return_intermediate_images=False):\n \"\"\"Apply postprocessing to the pure optical psf image\n Parameters\n ----------\n optPsf : `np.array`, (N, N)\n Optical image, only psf\n return_intermediate_images : `bool`\n If True, return intermediate images created during the run\n This is potentially in order to help with debugging and inspect\n the images created during the process\n Returns\n ----------\n (At the moment, the output is the same no matter what\n return_intermediate_images is, but there is a possibility\n to add intermediate outputs)\n optPsf_final : `np.array`, (N, N)\n Final model image\n psf_position : `np.array`, (2,)\n Position where the image is centered\n Notes\n ----------\n Takes optical psf and ``postprocesses`` it to generate final image.\n The algorithm first reduces the oversampling and cuts the central part\n of the image. This is done to speed up the calculations.\n Then we apply various effects that are separate from\n the pure optical PSF considerations.\n We then finish with the centering algorithm to move our created image\n to fit the input science image, invoking PSFPosition class.\n The effects we apply are\n 1. scattered light\n function apply_scattered_light\n 2. convolution with fiber\n function convolve_with_fiber\n 3. CCD difusion\n function convolve_with_CCD_diffusion\n 4. grating effects\n function convolve_with_grating\n 5. centering\n via class PsfPosition\n \"\"\"\n time_start_single = time.time()\n if self.verbosity == 1:\n logging.info(' ')\n logging.info('Entering optPsf_postprocessing')\n\n params = self.params\n shape = self.image.shape\n\n # all of the parameters for the creation of the image\n # very stupidly called ``v'' without any reason whatsoever\n param_values = params.valuesdict()\n\n # how much is my generated image oversampled compared to final image\n oversampling_original = (self.pixelScale_effective) / self.scale_ModelImage_PFS_naturalResolution\n\n if self.verbosity == 1:\n logging.info('Shape of optPsf: ' + str(optPsf.shape))\n logging.info('Value of oversampling_original: ' + str(oversampling_original))\n\n # determine the size, so that from the huge generated image we can cut out\n # only the central portion (1.4 times larger than the size of actual\n # image)\n size_of_central_cut = int(oversampling_original * self.image.shape[0] * 1.4)\n\n if size_of_central_cut > optPsf.shape[0]:\n # if larger than size of image, cut the image\n # fail if not enough space\n size_of_central_cut = optPsf.shape[0]\n if self.verbosity == 1:\n logging.info('size_of_central_cut modified to ' + str(size_of_central_cut))\n assert int(oversampling_original * self.image.shape[0] * 1.0) < optPsf.shape[0]\n\n assert size_of_central_cut <= optPsf.shape[0]\n if self.verbosity == 1:\n logging.info('size_of_central_cut: ' + str(size_of_central_cut))\n\n # cut part which you need to form the final image\n # set oversampling to 1 so you are not resizing the image, and dx=0 and\n # dy=0 so that you are not moving around, i.e., you are cutting the\n # central region\n optPsf_cut = PsfPosition.cut_Centroid_of_natural_resolution_image(\n image=optPsf, size_natural_resolution=size_of_central_cut + 1, oversampling=1, dx=0, dy=0)\n if self.verbosity == 1:\n logging.info('optPsf_cut.shape' + str(optPsf_cut.shape))\n\n # we want to reduce oversampling to be roughly around 10 to make things computationaly easier\n # if oversamplign_original is smaller than 20 (in case of dithered images),\n # make res coarser by factor of 2\n # otherwise set it to 11\n if oversampling_original < 20:\n oversampling = np.round(oversampling_original / 2)\n else:\n oversampling = 11\n if self.verbosity == 1:\n logging.info('oversampling:' + str(oversampling))\n\n # what will be the size of the image after you resize it to the from\n # ``oversampling_original'' to ``oversampling'' ratio\n size_of_optPsf_cut_downsampled = np.int(\n np.round(size_of_central_cut / (oversampling_original / oversampling)))\n if self.verbosity == 1:\n logging.info('size_of_optPsf_cut_downsampled: ' + str(size_of_optPsf_cut_downsampled))\n\n # make sure that optPsf_cut_downsampled is an array which has an odd size\n # - increase size by 1 if needed\n if (size_of_optPsf_cut_downsampled % 2) == 0:\n im1 = galsim.Image(optPsf_cut, copy=True, scale=1)\n im1.setCenter(0, 0)\n interpolated_image = galsim._InterpolatedImage(im1, x_interpolant=galsim.Lanczos(5, True))\n optPsf_cut_downsampled = interpolated_image.\\\n drawImage(nx=size_of_optPsf_cut_downsampled + 1, ny=size_of_optPsf_cut_downsampled + 1,\n scale=(oversampling_original / oversampling), method='no_pixel').array\n else:\n im1 = galsim.Image(optPsf_cut, copy=True, scale=1)\n im1.setCenter(0, 0)\n interpolated_image = galsim._InterpolatedImage(im1, x_interpolant=galsim.Lanczos(5, True))\n optPsf_cut_downsampled = interpolated_image.\\\n drawImage(nx=size_of_optPsf_cut_downsampled, ny=size_of_optPsf_cut_downsampled,\n scale=(oversampling_original / oversampling), method='no_pixel').array\n\n if self.verbosity == 1:\n logging.info('optPsf_cut_downsampled.shape: ' + str(optPsf_cut_downsampled.shape))\n\n # gives middle point of the image to used for calculations of scattered light\n # mid_point_of_optPsf_cut_downsampled = int(optPsf_cut_downsampled.shape[0] / 2)\n\n # gives the size of one pixel in optPsf_downsampled in microns\n # one physical pixel is 15 microns\n # effective size is 15 / dithering\n # size_of_pixels_in_optPsf_cut_downsampled = (15 / self.dithering) / oversampling\n\n # size of the created optical PSF images in microns\n # size_of_optPsf_cut_in_Microns = size_of_pixels_in_optPsf_cut_downsampled * \\\n # (optPsf_cut_downsampled.shape[0])\n # if self.verbosity == 1:\n # logging.info('size_of_optPsf_cut_in_Microns: ' + str(size_of_optPsf_cut_in_Microns))\n\n if self.verbosity == 1:\n logging.info('Postprocessing parameters are:')\n logging.info(str(['grating_lines', 'scattering_slope', 'scattering_amplitude',\n 'pixel_effect', 'fiber_r']))\n logging.info(str([param_values['grating_lines'], param_values['scattering_slope'],\n param_values['scattering_amplitude'], param_values['pixel_effect'],\n param_values['fiber_r']]))\n\n ##########################################\n # 1. scattered light\n optPsf_cut_downsampled_scattered = self.apply_scattered_light(optPsf_cut_downsampled,\n oversampling,\n param_values['scattering_slope'],\n param_values['scattering_amplitude'],\n dithering=self.dithering)\n\n ##########################################\n # 2. convolution with fiber\n optPsf_cut_fiber_convolved = self.convolve_with_fiber(optPsf_cut_downsampled_scattered,\n oversampling,\n param_values['fiber_r'],\n dithering=self.dithering)\n\n ##########################################\n # 3. CCD difusion\n optPsf_cut_pixel_response_convolved = self.convolve_with_CCD_diffusion(optPsf_cut_fiber_convolved,\n oversampling,\n param_values['pixel_effect'],\n dithering=self.dithering)\n\n ##########################################\n # 4. grating effects\n optPsf_cut_grating_convolved = self.convolve_with_grating(optPsf_cut_pixel_response_convolved,\n oversampling,\n self.wavelength,\n param_values['grating_lines'],\n dithering=self.dithering)\n\n ##########################################\n # 5. centering\n # This is the part which creates the final image\n\n # the algorithm finds the best downsampling combination automatically\n if self.verbosity == 1:\n logging.info('Are we invoking double sources (1 or True if yes): ' + str(self.double_sources))\n logging.info('Double source position/ratio is:' + str(self.double_sources_positions_ratios))\n\n # initialize the class which does the centering -\n # TODO: the separation between the class and the main function in the class,\n # ``find_single_realization_min_cut'', is a bit blurry and unsatisfactory\n # this needs to be improved\n single_Psf_position = PsfPosition(optPsf_cut_grating_convolved,\n int(round(oversampling)),\n shape[0],\n simulation_00=self.simulation_00,\n verbosity=self.verbosity,\n save=self.save,\n PSF_DIRECTORY=self.PSF_DIRECTORY)\n time_end_single = time.time()\n if self.verbosity == 1:\n logging.info('Time for postprocessing up to single_Psf_position protocol is: '\n + str(time_end_single - time_start_single))\n\n # run the code for centering\n time_start_single = time.time()\n optPsf_final, psf_position =\\\n single_Psf_position.find_single_realization_min_cut(optPsf_cut_grating_convolved,\n int(round(oversampling)),\n shape[0],\n self.image,\n self.image_var,\n self.image_mask,\n v_flux=param_values['flux'],\n double_sources=self.double_sources,\n double_sources_positions_ratios= # noqa: E251\n self.double_sources_positions_ratios,\n verbosity=self.verbosity,\n explicit_psf_position= # noqa: E251\n self.explicit_psf_position,\n use_only_chi=self.use_only_chi,\n use_center_of_flux=self.use_center_of_flux)\n time_end_single = time.time()\n\n if self.verbosity == 1:\n logging.info('Time for single_Psf_position protocol is '\n + str(time_end_single - time_start_single))\n\n if self.verbosity == 1:\n logging.info('Sucesfully created optPsf_final')\n print(self.save)\n if self.save == 1:\n np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut', optPsf_cut)\n np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_downsampled', optPsf_cut_downsampled)\n np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_downsampled_scattered',\n optPsf_cut_downsampled_scattered)\n np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_fiber_convolved',\n optPsf_cut_fiber_convolved)\n np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_pixel_response_convolved',\n optPsf_cut_pixel_response_convolved)\n np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'optPsf_cut_grating_convolved',\n optPsf_cut_grating_convolved)\n\n if self.verbosity == 1:\n logging.info('Finished with optPsf_postprocessing')\n logging.info(' ')\n\n # TODO: at the moment, the output is the same but there is a possibility to add intermediate outputs\n if not return_intermediate_images:\n return optPsf_final, psf_position\n\n if return_intermediate_images:\n return optPsf_final, psf_position\n\n def apply_scattered_light(self, image, oversampling,\n scattering_slope, scattering_amplitude, dithering):\n \"\"\"Add scattered light to optical psf\n Parameters\n ----------\n image : `np.array`, (N, N)\n input image\n oversampling: `int`\n how oversampled is `image`\n scattering_slope: `float`\n slope of the scattered light\n scattering_amplitude: `float`\n amplitude of the scattered light\n dithering: `int`\n dithering\n Returns\n ----------\n image_scattered : `np.array`, (N, N)\n image convolved with the fiber image\n Notes\n ----------\n Assumes that one physical pixel is 15 microns\n so that effective size of the pixels is 15 / dithering\n \"\"\"\n size_of_pixels_in_image = (15 / self.dithering) / oversampling\n\n # size of the created optical PSF images in microns\n size_of_image_in_Microns = size_of_pixels_in_image * \\\n (image.shape[0])\n\n # create grid to apply scattered light\n pointsx = np.linspace(-(size_of_image_in_Microns - size_of_pixels_in_image) / 2,\n (size_of_image_in_Microns - size_of_pixels_in_image) / 2,\n num=image.shape[0],\n dtype=np.float32)\n pointsy = np.linspace(-(size_of_image_in_Microns - size_of_pixels_in_image) / 2,\n (size_of_image_in_Microns - size_of_pixels_in_image) / 2,\n num=image.shape[0]).astype(np.float32)\n xs, ys = np.meshgrid(pointsx, pointsy)\n r0 = np.sqrt((xs - 0) ** 2 + (ys - 0) ** 2) + .01\n\n # creating scattered light\n scattered_light_kernel = (r0**(-scattering_slope))\n scattered_light_kernel[r0 < 7.5] = 7.5**(-scattering_slope)\n scattered_light_kernel[scattered_light_kernel == np.inf] = 0\n scattered_light_kernel = scattered_light_kernel * \\\n (scattering_amplitude) / (10 * np.max(scattered_light_kernel))\n\n # convolve the psf with the scattered light kernel to create scattered light component\n scattered_light = signal.fftconvolve(image, scattered_light_kernel, mode='same')\n\n # add back the scattering to the image\n image_scattered = image + scattered_light\n\n return image_scattered\n\n def convolve_with_fiber(self, image, oversampling, fiber_r, dithering):\n \"\"\"Convolve optical psf with a fiber\n Parameters\n ----------\n image : `np.array`, (N, N)\n input image\n oversampling: `int`\n how oversampled is `image`\n fiber_r: `float`\n radius of the fiber in pixel units\n dithering: `int`\n dithering\n Returns\n ----------\n image_fiber_convolved : `np.array`, (N, N)\n image convolved with the fiber image\n Notes\n ----------\n \"\"\"\n fiber = Tophat2DKernel(oversampling * fiber_r * dithering,\n mode='oversample').array\n # create array with zeros with size of the current image, which we will\n # fill with fiber array in the middle\n fiber_padded = np.zeros_like(image, dtype=np.float32)\n mid_point_of_image = int(image.shape[0] / 2)\n fiber_array_size = fiber.shape[0]\n # fill the zeroes image with fiber here\n fiber_padded[int(mid_point_of_image - fiber_array_size / 2) + 1:\n int(mid_point_of_image + fiber_array_size / 2) + 1,\n int(mid_point_of_image - fiber_array_size / 2) + 1:\n int(mid_point_of_image + fiber_array_size / 2) + 1] = fiber\n\n # convolve with the fiber\n image_fiber_convolved = signal.fftconvolve(image, fiber_padded, mode='same')\n return image_fiber_convolved\n\n def convolve_with_CCD_diffusion(self, image, oversampling, pixel_effect, dithering):\n \"\"\"Convolve optical psf with a ccd diffusion effect\n Parameters\n ----------\n image : `np.array`, (N, N)\n input image\n oversampling: `int`\n how oversampled is `image`\n pixel_effect: `float`\n sigma of gaussian kernel convolving image\n dithering: `int`\n dithering\n Returns\n ----------\n image_pixel_response_convolved : `np.array`, (N, N)\n image convolved with the ccd diffusion kernel\n Notes\n ----------\n Pixels are not perfect detectors\n Charge diffusion in our optical CCDs, can be well described with a Gaussian\n sigma that is around 7 microns (Jim Gunn - private communication).\n This is controled in our code by @param 'pixel_effect'\n \"\"\"\n pixel_gauss = Gaussian2DKernel(oversampling * pixel_effect * dithering).array.astype(np.float32)\n pixel_gauss_padded = np.pad(pixel_gauss, int((len(image) - len(pixel_gauss)) / 2),\n 'constant', constant_values=0)\n\n # assert that gauss_padded array did not produce empty array\n assert np.sum(pixel_gauss_padded) > 0\n\n image_pixel_response_convolved = signal.fftconvolve(image, pixel_gauss_padded, mode='same')\n return image_pixel_response_convolved\n\n def convolve_with_grating(self, image, oversampling, wavelength, grating_lines, dithering):\n \"\"\"Convolve optical psf with a grating effect\n Parameters\n ----------\n image : `np.array`, (N, N)\n input image\n oversampling: `int`\n how oversampled is `image`\n wavelength: `float`\n central wavelength of the spot\n grating_lines: `int`\n effective number of grating lines in the spectrograph\n dithering: `int`\n dithering\n\n Returns\n ----------\n image_grating_convolved : `np.array`, (N, N)\n image convolved with the grating effect\n\n Notes\n ----------\n This code assumes that 15 microns covers wavelength range of 0.07907 nm\n (assuming that 4300 pixels in real detector uniformly covers 340 nm)\n \"\"\"\n grating_kernel = np.ones((image.shape[0], 1), dtype=np.float32)\n for i in range(len(grating_kernel)):\n grating_kernel[i] = Ifun16Ne((i - int(image.shape[0] / 2)) * 0.07907 * 10**-9\n / (dithering * oversampling) + wavelength * 10**-9,\n wavelength * 10**-9, grating_lines)\n grating_kernel = grating_kernel / np.sum(grating_kernel)\n\n image_grating_convolved = signal.fftconvolve(image, grating_kernel, mode='same')\n return image_grating_convolved\n\n def _get_Pupil(self):\n \"\"\"Create an image of the pupil\n\n Parameters\n ----------\n params : `lmfit.Parameters` object or python dictionary\n Parameters describing the pupil model\n\n Returns\n ----------\n pupil : `pupil`\n Instance of class PFSPupilFactory\n\n Notes\n ----------\n Calls PFSPupilFactory class\n \"\"\"\n if self.verbosity == 1:\n logging.info(' ')\n logging.info('Entering _get_Pupil (function inside ZernikeFitterPFS)')\n\n if self.verbosity == 1:\n logging.info('Size of the pupil (npix): ' + str(self.npix))\n\n Pupil_Image = PFSPupilFactory(\n pupilSize=self.diam_sic,\n npix=self.npix,\n input_angle=np.pi / 2,\n detFrac=self.params['detFrac'].value,\n strutFrac=self.params['strutFrac'].value,\n slitFrac=self.params['slitFrac'].value,\n slitFrac_dy=self.params['slitFrac_dy'].value,\n x_fiber=self.params['x_fiber'].value,\n y_fiber=self.params['y_fiber'].value,\n effective_ilum_radius=self.params['effective_ilum_radius'].value,\n frd_sigma=self.params['frd_sigma'].value, # noqa: E\n frd_lorentz_factor=self.params['frd_lorentz_factor'].value,\n det_vert=self.params['det_vert'].value,\n slitHolder_frac_dx=self.params['slitHolder_frac_dx'].value,\n wide_0=self.params['wide_0'].value,\n wide_23=self.params['wide_23'].value,\n wide_43=self.params['wide_43'].value,\n misalign=self.params['misalign'].value,\n verbosity=self.verbosity)\n\n point = [self.params['dxFocal'].value, self.params['dyFocal'].value] # noqa: E\n pupil = Pupil_Image.getPupil(point)\n\n if self.save == 1:\n np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'pupil.illuminated',\n pupil.illuminated.astype(np.float32))\n\n if self.verbosity == 1:\n logging.info('Finished with _get_Pupil')\n\n return pupil\n\n def _getOptPsf_naturalResolution(self, params, return_intermediate_images=False):\n \"\"\"Returns optical PSF, given the initialized parameters\n\n Parameters\n ----------\n params : `lmfit.Parameters` object or python dictionary\n Parameters descrubing model\n return_intermediate_images : `bool`\n If True, return intermediate images created during the run\n This is in order to help with debugging and inspect\n the images created during the process\n\n Returns\n ----------\n (if not return_intermediate_images)\n img_apod : `np.array`\n Psf image, only optical components considred\n (if return_intermediate_images)\n # return the image, pupil, illumination applied to the pupil\n img_apod : `np.array`\n Psf image, only optical components considred\n ilum : `np.array`\n Image showing the illumination of the pupil\n wf_grid_rot : `np.array`\n Image showing the wavefront across the pupil\n\n Notes\n ----------\n called by constructModelImage_PFS_naturalResolution\n \"\"\"\n\n if self.verbosity == 1:\n logging.info(' ')\n logging.info('Entering _getOptPsf_naturalResolution')\n\n ################################################################################\n # pupil and illumination of the pupil\n ################################################################################\n time_start_single_1 = time.time()\n if self.verbosity == 1:\n logging.info('use_pupil_parameters: ' + str(self.use_pupil_parameters))\n logging.info('pupil_parameters if you are explicity passing use_pupil_parameters: '\n + str(self.pupil_parameters))\n\n # parmeters ``i'' just to precision in the construction of ``pupil_parameters'' array\n # not sure why linter is complaining here with\n # ('...'.format(...) has unused arguments at position(s): 0)\n i = 4\n if self.use_pupil_parameters is None:\n pupil_parameters = np.array([params['detFrac'.format(i)], # noqa: E\n params['strutFrac'.format(i)], # noqa: E\n params['dxFocal'.format(i)], # noqa: E\n params['dyFocal'.format(i)], # noqa: E\n params['slitFrac'.format(i)], # noqa: E\n params['slitFrac_dy'.format(i)], # noqa: E\n params['x_fiber'.format(i)], # noqa: E\n params['y_fiber'.format(i)], # noqa: E\n params['effective_ilum_radius'.format(i)], # noqa: E\n params['frd_sigma'.format(i)], # noqa: E\n params['frd_lorentz_factor'.format(i)], # noqa: E\n params['det_vert'.format(i)], # noqa: E\n params['slitHolder_frac_dx'.format(i)], # noqa: E\n params['wide_0'.format(i)], # noqa: E\n params['wide_23'.format(i)], # noqa: E\n params['wide_43'.format(i)], # noqa: E\n params['misalign'.format(i)]]) # noqa: E\n self.pupil_parameters = pupil_parameters\n else:\n pupil_parameters = np.array(self.pupil_parameters)\n\n diam_sic = self.diam_sic\n\n if self.verbosity == 1:\n logging.info(['detFrac', 'strutFrac', 'dxFocal', 'dyFocal', 'slitFrac', 'slitFrac_dy'])\n logging.info(['x_fiber', 'y_fiber', 'effective_ilum_radius', 'frd_sigma',\n 'frd_lorentz_factor', 'det_vert', 'slitHolder_frac_dx'])\n logging.info(['wide_0', 'wide_23', 'wide_43', 'misalign'])\n logging.info('set of pupil_parameters I. : ' + str([params['detFrac'], params['strutFrac'],\n params['dxFocal'], params['dyFocal'],\n params['slitFrac'], params['slitFrac_dy']]))\n logging.info('set of pupil_parameters II. : ' + str([params['x_fiber'], params['y_fiber'],\n params['effective_ilum_radius'],\n params['slitHolder_frac_dx'],\n params['frd_lorentz_factor'],\n params['det_vert'],\n params['slitHolder_frac_dx']]))\n logging.info('set of pupil_parameters III. : ' + str([params['wide_0'], params['wide_23'],\n params['wide_43'], params['misalign']]))\n time_start_single_2 = time.time()\n\n # initialize galsim.Aperature class\n pupil = self._get_Pupil()\n aper = galsim.Aperture(\n diam=pupil.size,\n pupil_plane_im=pupil.illuminated.astype(np.float32),\n pupil_plane_scale=pupil.scale,\n pupil_plane_size=None)\n\n if self.verbosity == 1:\n if self.pupilExplicit is None:\n logging.info('Requested pupil size is (pupil.size) [m]: ' + str(pupil.size))\n logging.info('One pixel has size of (pupil.scale) [m]: ' + str(pupil.scale))\n logging.info('Requested pupil has so many pixels (pupil_plane_im): '\n + str(pupil.illuminated.astype(np.int16).shape))\n else:\n logging.info('Supplied pupil size is (diam_sic) [m]: ' + str(self.diam_sic))\n logging.info('One pixel has size of (diam_sic/npix) [m]: ' + str(self.diam_sic / self.npix))\n logging.info('Requested pupil has so many pixels (pupilExplicit): '\n + str(self.pupilExplicit.shape))\n\n time_end_single_2 = time.time()\n if self.verbosity == 1:\n logging.info('Time for _get_Pupil function is ' + str(time_end_single_2 - time_start_single_2))\n\n time_start_single_3 = time.time()\n # create array with pixels=1 if the area is illuminated and 0 if it is obscured\n ilum = np.array(aper.illuminated, dtype=np.float32)\n assert np.sum(ilum) > 0, str(self.pupil_parameters)\n\n # gives size of the illuminated image\n lower_limit_of_ilum = int(ilum.shape[0] / 2 - self.npix / 2)\n higher_limit_of_ilum = int(ilum.shape[0] / 2 + self.npix / 2)\n if self.verbosity == 1:\n logging.info('lower_limit_of_ilum: ' + str(lower_limit_of_ilum))\n logging.info('higher_limit_of_ilum: ' + str(higher_limit_of_ilum))\n\n if self.pupilExplicit is None:\n ilum[lower_limit_of_ilum:higher_limit_of_ilum,\n lower_limit_of_ilum:higher_limit_of_ilum] = ilum[lower_limit_of_ilum:higher_limit_of_ilum,\n lower_limit_of_ilum:higher_limit_of_ilum] *\\\n pupil.illuminated\n else:\n ilum[lower_limit_of_ilum:higher_limit_of_ilum,\n lower_limit_of_ilum:higher_limit_of_ilum] = ilum[lower_limit_of_ilum:higher_limit_of_ilum,\n lower_limit_of_ilum:higher_limit_of_ilum] *\\\n self.pupilExplicit.astype(np.float32)\n\n if self.verbosity == 1:\n logging.info('Size after padding zeros to 2x size'\n + 'and extra padding to get size suitable for FFT: '\n + str(ilum.shape))\n\n # maximum extent of pupil image in units of radius of the pupil, needed for next step\n size_of_ilum_in_units_of_radius = ilum.shape[0] / self.npix\n\n if self.verbosity == 1:\n logging.info('size_of_ilum_in_units_of_radius: ' + str(size_of_ilum_in_units_of_radius))\n\n # do not caculate the ``radiometric effect (difference between entrance and exit pupil)\n # if paramters are too small to make any difference\n # if that is the case just declare the ``ilum_radiometric'' to be the same as ilum\n # i.e., the illumination of the exit pupil is the same as the illumination of the entrance pupil\n if params['radiometricExponent'] < 0.01 or params['radiometricEffect'] < 0.01:\n if self.verbosity == 1:\n logging.info('skiping ``radiometric effect\\'\\' ')\n ilum_radiometric = ilum\n\n else:\n if self.verbosity == 1:\n logging.info('radiometric parameters are: ')\n logging.info('x_ilum,y_ilum,radiometricEffect,radiometricExponent'\n + str([params['x_ilum'], params['y_ilum'],\n params['radiometricEffect'], params['radiometricExponent']]))\n\n # add the change of flux between the entrance and exit pupil\n # end product is radiometricEffectArray\n points = np.linspace(-size_of_ilum_in_units_of_radius,\n size_of_ilum_in_units_of_radius, num=ilum.shape[0])\n xs, ys = np.meshgrid(points, points)\n _radius_coordinate = np.sqrt(\n (xs - params['x_ilum'] * params['dxFocal']) ** 2\n + (ys - params['y_ilum'] * params['dyFocal']) ** 2)\n\n # change in v_0.14\n # ilumination to which radiometric effet has been applied, describing\n # difference betwen entrance and exit pupil\n radiometricEffectArray = (1 + params['radiometricEffect']\n * _radius_coordinate**2)**(-params['radiometricExponent'])\n ilum_radiometric = np.nan_to_num(radiometricEffectArray * ilum, 0)\n\n # this is where you can introduce some apodization in the pupil image by using the line below\n # the apodization sigma is set to that in focus it is at 0.75\n # for larger images, scale according to the size of the input image which is to be FFT-ed\n # 0.75 is an arbitrary number\n apodization_sigma = ((len(ilum_radiometric)) / 1158)**0.875 * 0.75\n # apodization_sigma=0.75\n time_start_single_4 = time.time()\n\n # old code where I applied Gaussian to the whole ilum image\n # ilum_radiometric_apodized = gaussian_filter(ilum_radiometric, sigma=apodization_sigma)\n\n # cut out central region, apply Gaussian on the center region and return to the full size image\n # done to spped up the calculation\n # noqa: E128 in order to keep informative names\n ilum_radiometric_center_region =\\\n ilum_radiometric[(lower_limit_of_ilum - int(np.ceil(3 * apodization_sigma))):\n (higher_limit_of_ilum + int(np.ceil(3 * apodization_sigma))),\n (lower_limit_of_ilum - int(np.ceil(3 * apodization_sigma))):\n (higher_limit_of_ilum + int(np.ceil(3 * apodization_sigma)))]\n\n ilum_radiometric_center_region_apodized = gaussian_filter(\n ilum_radiometric_center_region, sigma=apodization_sigma)\n\n ilum_radiometric_apodized = np.copy(ilum_radiometric)\n ilum_radiometric_apodized[(lower_limit_of_ilum - int(np.ceil(3 * apodization_sigma))):\n (higher_limit_of_ilum + int(np.ceil(3 * apodization_sigma))),\n (lower_limit_of_ilum - int(np.ceil(3 * apodization_sigma))):\n (higher_limit_of_ilum + int(np.ceil(3 * apodization_sigma)))] =\\\n ilum_radiometric_center_region_apodized # noqa E:122\n\n time_end_single_4 = time.time()\n if self.verbosity == 1:\n logging.info('Time to apodize the pupil: ' + str(time_end_single_4 - time_start_single_4))\n logging.info('type(ilum_radiometric_apodized)' + str(type(ilum_radiometric_apodized[0][0])))\n # put pixels for which amplitude is less than 0.01 to 0\n r_ilum_pre = np.copy(ilum_radiometric_apodized)\n r_ilum_pre[ilum_radiometric_apodized > 0.01] = 1\n r_ilum_pre[ilum_radiometric_apodized < 0.01] = 0\n ilum_radiometric_apodized_bool = r_ilum_pre.astype(bool)\n\n # manual creation of aper.u and aper.v (mimicking steps which were automatically done in galsim)\n # this gives position information about each point in the exit pupil so we can apply wavefront to it\n\n # aperu_manual=[]\n # for i in range(len(ilum_radiometric_apodized_bool)):\n # aperu_manual.append(np.linspace(-diam_sic*(size_of_ilum_in_units_of_radius/2),\n # diam_sic*(size_of_ilum_in_units_of_radius/2),len(ilum_radiometric_apodized_bool), endpoint=True))\n single_line_aperu_manual = np.linspace(-diam_sic * (size_of_ilum_in_units_of_radius / 2), diam_sic * (\n size_of_ilum_in_units_of_radius / 2), len(ilum_radiometric_apodized_bool), endpoint=True)\n aperu_manual = np.tile(\n single_line_aperu_manual,\n len(single_line_aperu_manual)).reshape(\n len(single_line_aperu_manual),\n len(single_line_aperu_manual))\n\n # full grid\n # u_manual=np.array(aperu_manual)\n u_manual = aperu_manual\n v_manual = np.transpose(aperu_manual)\n\n # select only parts of the grid that are actually illuminated\n u = u_manual[ilum_radiometric_apodized_bool]\n v = v_manual[ilum_radiometric_apodized_bool]\n\n time_end_single_3 = time.time()\n if self.verbosity == 1:\n logging.info('Time for postprocessing pupil after _get_Pupil '\n + str(time_end_single_3 - time_start_single_3))\n\n time_end_single_1 = time.time()\n if self.verbosity == 1:\n logging.info('Time for pupil and illumination calculation is '\n + str(time_end_single_1 - time_start_single_1))\n\n ################################################################################\n # wavefront\n ################################################################################\n # create wavefront across the exit pupil\n\n time_start_single = time.time()\n if self.verbosity == 1:\n logging.info('')\n logging.info('Starting creation of wavefront')\n\n aberrations_init = [0.0, 0, 0.0, 0.0]\n aberrations = aberrations_init\n # list of aberrations where we set z4, z11, z22 etc...\n # This is only for testing purposes to study behaviour of non-focus terms\n aberrations_0 = list(np.copy(aberrations_init))\n for i in range(4, self.zmax + 1):\n aberrations.append(params['z{}'.format(i)])\n if i in [4, 11, 22]:\n aberrations_0.append(0)\n else:\n aberrations_0.append(params['z{}'.format(i)])\n\n # if you have passed abberation above Zernike 22, join them with lower\n # order abberations here\n if self.extraZernike is None:\n pass\n else:\n aberrations_extended = np.concatenate((aberrations, self.extraZernike), axis=0)\n\n if self.verbosity == 1:\n logging.info('diam_sic [m]: ' + str(diam_sic))\n logging.info('aberrations: ' + str(aberrations))\n logging.info('aberrations moved to z4=0: ' + str(aberrations_0))\n logging.info('aberrations extra: ' + str(self.extraZernike))\n logging.info('wavelength [nm]: ' + str(self.wavelength))\n\n if self.extraZernike is None:\n optics_screen = galsim.phase_screens.OpticalScreen(\n diam=diam_sic, aberrations=aberrations, lam_0=self.wavelength)\n if self.save == 1:\n # only create fake with abberations 0 if we are going to save i.e., if we\n # presenting the results\n optics_screen_fake_0 = galsim.phase_screens.OpticalScreen(\n diam=diam_sic, aberrations=aberrations_0, lam_0=self.wavelength)\n else:\n optics_screen = galsim.phase_screens.OpticalScreen(\n diam=diam_sic, aberrations=aberrations_extended, lam_0=self.wavelength)\n if self.save == 1:\n # only create fake with abberations 0 if we are going to save i.e., if we\n # presenting the results\n optics_screen_fake_0 = galsim.phase_screens.OpticalScreen(\n diam=diam_sic, aberrations=aberrations_0, lam_0=self.wavelength)\n\n screens = galsim.PhaseScreenList(optics_screen)\n if self.save == 1:\n # only create fake with abberations 0 if we are going to save i.e., if we presenting the results\n screens_fake_0 = galsim.PhaseScreenList(optics_screen_fake_0)\n\n time_end_single = time.time()\n\n ################################################################################\n # combining pupil illumination and wavefront\n ################################################################################\n\n # apply wavefront to the array describing illumination\n # logging.info(self.use_wf_grid)\n\n if self.use_wf_grid is None:\n wf = screens.wavefront(u, v, None, 0)\n if self.save == 1:\n wf_full = screens.wavefront(u_manual, v_manual, None, 0)\n wf_grid = np.zeros_like(ilum_radiometric_apodized_bool, dtype=np.float32)\n wf_grid[ilum_radiometric_apodized_bool] = (wf / self.wavelength)\n wf_grid_rot = wf_grid\n else:\n # if you want to pass an explit wavefront, it goes here\n wf_grid = self.use_wf_grid\n wf_grid_rot = wf_grid\n\n if self.save == 1:\n # only create fake images with abberations set to 0 if we are going to save\n # i.e., if we are testing the results\n if self.verbosity == 1:\n logging.info('creating wf_full_fake_0')\n wf_full_fake_0 = screens_fake_0.wavefront(u_manual, v_manual, None, 0)\n\n # exponential of the wavefront\n expwf_grid = np.zeros_like(ilum_radiometric_apodized_bool, dtype=np.complex64)\n expwf_grid[ilum_radiometric_apodized_bool] =\\\n ilum_radiometric_apodized[ilum_radiometric_apodized_bool] *\\\n np.exp(2j * np.pi * wf_grid_rot[ilum_radiometric_apodized_bool])\n\n if self.verbosity == 1:\n logging.info('Time for wavefront and wavefront/pupil combining is '\n + str(time_end_single - time_start_single))\n\n ################################################################################\n # exectute the FFT\n ################################################################################\n # updated up to here\n ######################################################################\n\n time_start_single = time.time()\n ftexpwf = np.fft.fftshift(scipy.fftpack.fft2(np.fft.fftshift(expwf_grid)))\n img_apod = np.abs(ftexpwf)**2\n time_end_single = time.time()\n if self.verbosity == 1:\n logging.info('Time for FFT is ' + str(time_end_single - time_start_single))\n ######################################################################\n\n # size in arcseconds of the image generated by the code\n scale_ModelImage_PFS_naturalResolution = sky_scale(\n size_of_ilum_in_units_of_radius * self.diam_sic, self.wavelength)\n self.scale_ModelImage_PFS_naturalResolution = scale_ModelImage_PFS_naturalResolution\n\n if self.save == 1:\n if socket.gethostname() == 'IapetusUSA' or socket.gethostname() == 'tiger2-sumire.princeton.edu' \\\n or socket.gethostname() == 'pfsa-usr01-gb.subaru.nao.ac.jp' or \\\n socket.gethostname() == 'pfsa-usr02-gb.subaru.nao.ac.jp':\n np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'aperilluminated', aper.illuminated)\n np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'ilum', ilum)\n np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'ilum_radiometric', ilum_radiometric)\n np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'ilum_radiometric_apodized',\n ilum_radiometric_apodized)\n np.save(self.TESTING_PUPIL_IMAGES_FOLDER + 'ilum_radiometric_apodized_bool',\n ilum_radiometric_apodized_bool)\n np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'u_manual', u_manual)\n np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'v_manual', v_manual)\n np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'u', u)\n np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'v', v)\n np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'wf_grid', wf_grid)\n if self.use_wf_grid is None:\n np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'wf_full', wf_full)\n np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'wf_full_fake_0', wf_full_fake_0)\n np.save(self.TESTING_WAVEFRONT_IMAGES_FOLDER + 'expwf_grid', expwf_grid)\n\n if self.verbosity == 1:\n logging.info('Finished with _getOptPsf_naturalResolution')\n logging.info('Finished with _getOptPsf_naturalResolution')\n logging.info(' ')\n\n if not return_intermediate_images:\n return img_apod\n if return_intermediate_images:\n return img_apod, ilum[lower_limit_of_ilum:higher_limit_of_ilum,\n lower_limit_of_ilum:higher_limit_of_ilum], wf_grid_rot\n\n\nclass LN_PFS_multi_same_spot(object):\n \"\"\"!Class to compute quality of the multiple donut images,\n of the same spot taken at different defocuses\n\n Calls class LN_PFS_single, for example:\n model = LN_PFS_single(sci_image,var_image,pupil_parameters = pupil_parameters,\n use_pupil_parameters=None,zmax=zmax,save=1)\n def model_return(allparameters_proposal):\n return model(allparameters_proposal,return_Image=True)\n\n Called by class Tokovinin_multi\n \"\"\"\n\n def __init__(\n self,\n list_of_sci_images,\n list_of_var_images,\n list_of_mask_images=None,\n wavelength=None,\n dithering=None,\n save=None,\n verbosity=None,\n pupil_parameters=None,\n use_pupil_parameters=None,\n use_optPSF=None,\n list_of_wf_grid=None,\n zmax=None,\n extraZernike=None,\n pupilExplicit=None,\n simulation_00=None,\n double_sources=None,\n double_sources_positions_ratios=None,\n npix=None,\n list_of_defocuses=None,\n fit_for_flux=True,\n test_run=False,\n list_of_psf_positions=None,\n use_center_of_flux=False):\n \"\"\"\n @param list_of_sci_images list of science images, list of 2d array\n @param list_of_var_images list of variance images, 2d arrays,\n which are the same size as sci_image\n @param list_of_mask_images list of mask images, 2d arrays,\n which are the same size as sci_image\n @param dithering dithering, 1=normal, 2=two times higher resolution,\n 3=not supported\n @param save save intermediate result in the process\n (set value at 1 for saving)\n @param verbosity verbosity of the process\n (set value at 1 for full output)\n\n @param pupil_parameters\n @param use_pupil_parameters\n @param use_optPSF\n\n @param zmax largest Zernike order used\n (11 or 22, or larger than 22)\n @param extraZernike array consisting of higher order zernike\n (if using higher order than 22)\n @param pupilExplicit\n\n @param simulation_00 resulting image will be centered with optical center\n in the center of the image\n and not fitted acorrding to the sci_image\n @param double_sources 1 if there are other secondary sources in the image\n @param double_sources_positions_ratios / arrray with parameters describing relative position\\\n and relative flux of the secondary source(s)\n @param npxix size of the pupil (1536 reccomended)\n @param list_of_defocuses list of defocuses at which images are taken\n (float or string?)\n\n @param fit_for_flux automatically fit for the best flux level\n that minimizes the chi**2\n @param test_run if True, skips the creation of model and\n return science image - useful for testing\n interaction of outputs of the module\n in broader setting quickly\n @param explicit_psf_position gives position of the opt_psf\n \"\"\"\n\n if verbosity is None:\n verbosity = 0\n\n if use_pupil_parameters is not None:\n assert pupil_parameters is not None\n\n # logging.info('double_sources in module: ' + str(double_sources))\n # logging.info('double_sources_positions_ratios in module: ' + str(double_sources_positions_ratios))\n # logging.info('list_of_psf_positions in LN_PFS_multi_same_spot '+str(list_of_psf_positions))\n if double_sources is not None and bool(double_sources) is not False:\n assert np.sum(np.abs(double_sources_positions_ratios)) > 0\n\n if zmax is None:\n zmax = 11\n\n if zmax == 11:\n self.columns = [\n 'z4',\n 'z5',\n 'z6',\n 'z7',\n 'z8',\n 'z9',\n 'z10',\n 'z11',\n 'detFrac',\n 'strutFrac',\n 'dxFocal',\n 'dyFocal',\n 'slitFrac',\n 'slitFrac_dy',\n 'wide_0',\n 'wide_23',\n 'wide_43',\n 'misalign',\n 'x_fiber',\n 'y_fiber',\n 'effective_ilum_radius',\n 'frd_sigma',\n 'frd_lorentz_factor',\n 'det_vert',\n 'slitHolder_frac_dx',\n 'grating_lines',\n 'scattering_slope',\n 'scattering_amplitude',\n 'pixel_effect',\n 'fiber_r',\n 'flux']\n if zmax >= 22:\n self.columns = [\n 'z4',\n 'z5',\n 'z6',\n 'z7',\n 'z8',\n 'z9',\n 'z10',\n 'z11',\n 'z12',\n 'z13',\n 'z14',\n 'z15',\n 'z16',\n 'z17',\n 'z18',\n 'z19',\n 'z20',\n 'z21',\n 'z22',\n 'detFrac',\n 'strutFrac',\n 'dxFocal',\n 'dyFocal',\n 'slitFrac',\n 'slitFrac_dy',\n 'wide_0',\n 'wide_23',\n 'wide_43',\n 'misalign',\n 'x_fiber',\n 'y_fiber',\n 'effective_ilum_radius',\n 'frd_sigma',\n 'frd_lorentz_factor',\n 'det_vert',\n 'slitHolder_frac_dx',\n 'grating_lines',\n 'scattering_slope',\n 'scattering_amplitude',\n 'pixel_effect',\n 'fiber_r',\n 'flux']\n\n self.list_of_sci_images = list_of_sci_images\n self.list_of_var_images = list_of_var_images\n\n if list_of_mask_images is None:\n list_of_mask_images = []\n for i in range(len(list_of_sci_images)):\n mask_image = np.zeros(list_of_sci_images[i].shape)\n list_of_mask_images.append(mask_image)\n\n self.list_of_mask_images = list_of_mask_images\n\n # self.mask_image=mask_image\n # self.sci_image=sci_image\n # self.var_image=var_image\n self.wavelength = wavelength\n self.dithering = dithering\n self.save = save\n self.pupil_parameters = pupil_parameters\n self.use_pupil_parameters = use_pupil_parameters\n self.use_optPSF = use_optPSF\n self.pupilExplicit = pupilExplicit\n self.simulation_00 = simulation_00\n self.zmax = zmax\n self.extraZernike = extraZernike\n self.verbosity = verbosity\n self.double_sources = double_sources\n self.double_sources_positions_ratios = double_sources_positions_ratios\n self.npix = npix\n self.fit_for_flux = fit_for_flux\n self.list_of_defocuses = list_of_defocuses\n self.test_run = test_run\n if list_of_psf_positions is None:\n list_of_psf_positions = [None] * len(list_of_sci_images)\n self.list_of_psf_positions = list_of_psf_positions\n if list_of_wf_grid is None:\n list_of_wf_grid = [None] * len(list_of_sci_images)\n self.list_of_wf_grid = list_of_wf_grid\n\n # self.use_only_chi=use_only_chi\n self.use_center_of_flux = use_center_of_flux\n\n def move_parametrizations_from_1d_to_2d(self, allparameters_parametrizations_1d, zmax=None):\n \"\"\"Reshape the parametrization from 1d array to 2d array\n\n Parameters\n ----------\n allparameters_parametrizations_1d : `np.array`\n Parametriztion to be reshaped\n zmax : `int`\n Highest order of Zernike parameters applied\n\n Returns\n ----------\n allparameters_parametrizations_2d : `np.array`\n Parametrization in 2d form\n \"\"\"\n\n # 22 parameters has len of 61\n if zmax is None:\n zmax = int((len(allparameters_parametrizations_1d) - 61) / 2 + 22)\n\n assert len(allparameters_parametrizations_1d.shape) == 1\n\n z_parametrizations = allparameters_parametrizations_1d[:19 * 2].reshape(19, 2)\n g_parametrizations =\\\n np.transpose(np.vstack((np.zeros(len(allparameters_parametrizations_1d[19 * 2:19 * 2 + 23])),\n allparameters_parametrizations_1d[19 * 2:19 * 2 + 23])))\n\n if zmax > 22:\n extra_Zernike_parameters_number = zmax - 22\n z_extra_parametrizations = allparameters_parametrizations_1d[19 * 2 + 23:].reshape(\n extra_Zernike_parameters_number, 2)\n\n if zmax <= 22:\n allparameters_parametrizations_2d = np.vstack((z_parametrizations, g_parametrizations))\n if zmax > 22:\n allparameters_parametrizations_2d = np.vstack(\n (z_parametrizations, g_parametrizations, z_extra_parametrizations))\n\n # logging.info('allparameters_parametrizations_2d[41]: '+ str(allparameters_parametrizations_2d[41]))\n # assert allparameters_parametrizations_2d[41][1] >= 0.98\n # assert allparameters_parametrizations_2d[41][1] <= 1.02\n\n return allparameters_parametrizations_2d\n\n def create_list_of_allparameters(self, allparameters_parametrizations, list_of_defocuses=None, zmax=None):\n \"\"\"Create list of parameters at given defocuses\n\n Given the parametrizations (in either 1d or 2d ),\n create list_of_allparameters to be used in analysis of single images\n\n Parameters\n ----------\n allparameters_parametrizations : `np.array`\n Input parametrizations\n list_of_defocuses : `list`\n List contaning the strings of defoucses at which we are searching for parameters\n zmax : `int`\n Highest order of Zernike parameters applied\n\n Returns\n ----------\n list_of_allparameters : `list`\n List contaning the parameters for each defocus position\n \"\"\"\n\n # logging.info('allparameters_parametrizations '+str(allparameters_parametrizations))\n\n if zmax is None:\n zmax = self.zmax\n\n # if you have passed parameterization in 1d, move to 2d\n # logging.info(\"allparameters_parametrizations.type: \"+str(type(allparameters_parametrizations)))\n # logging.info(\"allparameters_parametrizations.len: \"+str(+len(allparameters_parametrizations)))\n # logging.info(\"allparameters_parametrizations.shape: \"+str(allparameters_parametrizations.shape))\n if len(allparameters_parametrizations.shape) == 1:\n allparameters_parametrizations = self.move_parametrizations_from_1d_to_2d(\n allparameters_parametrizations)\n\n list_of_allparameters = []\n\n # if this is only a single image, just return the input\n if list_of_defocuses is None:\n return allparameters_parametrizations\n else:\n list_of_defocuses_int = self.transform_list_of_defocuses_from_str_to_float(list_of_defocuses)\n # logging.info(list_of_defocuses_int)\n # go through the list of defocuses, and create the allparameters array for each defocus\n for i in range(len(list_of_defocuses)):\n list_of_allparameters.append(\n self.create_allparameters_single(\n list_of_defocuses_int[i],\n allparameters_parametrizations,\n zmax))\n\n # logging.info(list_of_allparameters)\n\n return list_of_allparameters\n\n def value_at_defocus(self, mm, a, b=None):\n \"\"\"Calculate linear fit to a value at a given defocus (in mm)\n\n Parameters\n ----------\n mm : `float`\n Slit defocus in mm\n a : `float`\n Linear parameter\n b :\n Contstant offset\n\n Returns\n ----------\n : `float`\n Result of linear fit\n \"\"\"\n\n if b is None:\n return a\n else:\n return a * mm + b\n\n def create_allparameters_single(self, mm, array_of_polyfit_1_parameterizations, zmax=None):\n \"\"\" Given the defous, transform parametrization into parameters for that defocus\n\n This function ransforms 1d array of ``parametrizations'' into ``parameters''m i.e.,\n into form acceptable for creating single images.\n This is a workhorse function used by function create_list_of_allparameters\n\n Parameters\n ----------\n mm : `float`\n defocus of the slit\n array_of_polyfit_1_parameterizations : `np.array`\n parametrization for linear fit for the parameters as a function of focus\n zmax : `int`\n Highest order of Zernike parameters applied\n\n Returns\n ----------\n allparameters_proposal_single : `np.array`\n Parameters that can be used to create single image\n \"\"\"\n\n if zmax is None:\n # if len is 42, the zmax is 22\n zmax = array_of_polyfit_1_parameterizations.shape[0] - 42 + 22\n if zmax > 22:\n extra_Zernike_parameters_number = zmax - 22\n else:\n extra_Zernike_parameters_number = zmax - 22\n\n # for single case, up to z11\n if zmax == 11:\n z_parametrizations = array_of_polyfit_1_parameterizations[:8]\n g_parametrizations = array_of_polyfit_1_parameterizations[8:]\n\n allparameters_proposal_single = np.zeros((8 + len(g_parametrizations)))\n\n for i in range(0, 8, 1):\n allparameters_proposal_single[i] = self.value_at_defocus(\n mm, z_parametrizations[i][0], z_parametrizations[i][1])\n\n for i in range(len(g_parametrizations)):\n allparameters_proposal_single[i + 8] = g_parametrizations[i][1]\n\n if zmax >= 22:\n z_parametrizations = array_of_polyfit_1_parameterizations[:19]\n g_parametrizations = array_of_polyfit_1_parameterizations[19:19 + 23]\n\n if extra_Zernike_parameters_number > 0:\n z_extra_parametrizations = array_of_polyfit_1_parameterizations[42:]\n\n allparameters_proposal_single = np.zeros(\n (19 + len(g_parametrizations) + extra_Zernike_parameters_number))\n\n for i in range(0, 19, 1):\n # logging.info(str([i,mm,z_parametrizations[i]]))\n allparameters_proposal_single[i] = self.value_at_defocus(\n mm, z_parametrizations[i][0], z_parametrizations[i][1])\n\n for i in range(len(g_parametrizations)):\n allparameters_proposal_single[19 + i] = g_parametrizations[i][1]\n\n for i in range(0, extra_Zernike_parameters_number, 1):\n # logging.info(str([i,mm,z_parametrizations[i]]))\n allparameters_proposal_single[19 + len(g_parametrizations) + i] = self.value_at_defocus(\n mm, z_extra_parametrizations[i][0], z_extra_parametrizations[i][1])\n\n return allparameters_proposal_single\n\n def transform_list_of_defocuses_from_str_to_float(self, list_of_defocuses):\n \"\"\"Transfroms list_of_defocuses from strings to float values\n\n Parameters\n ----------\n list_of_defocuses : `list`\n list of defocuses in string form (e.g., [m4,m25,0,p15,p4])\n\n Returns\n ----------\n list_of_defocuses_float : `list`\n list of defocuses in float form\n \"\"\"\n\n list_of_defocuses_float = []\n for i in range(len(list_of_defocuses)):\n if list_of_defocuses[i][0] == '0':\n list_of_defocuses_float.append(0)\n else:\n if list_of_defocuses[i][0] == 'm':\n sign = -1\n if list_of_defocuses[i][0] == 'p':\n sign = +1\n if len(list_of_defocuses[i]) == 2:\n list_of_defocuses_float.append(sign * float(list_of_defocuses[i][1:]))\n else:\n list_of_defocuses_float.append(sign * float(list_of_defocuses[i][1:]) / 10)\n\n return list_of_defocuses_float\n\n def create_resonable_allparameters_parametrizations(\n self,\n array_of_allparameters,\n list_of_defocuses_input,\n zmax,\n remove_last_n=None):\n \"\"\"Create ``parametrizations'' from list of ``parameters'' and defocuses\n\n Given parameters for single defocus images and their defocuses,\n create parameterizations (1d functions) for multi-image linear fit across various defocuses\n This is the inverse of function `create_list_of_allparameters`\n\n Parameters\n ----------\n array_of_allparameters : `np.array`\n Array with parameters of defocus, 2d array with shape\n [n(list_of_defocuses),number of parameters]\n list_of_defocuses_input : `list`\n List of strings at which defocuses are the data\n from array_of_allparameters\n zmax : `int`\n Highest order of Zernike parameters applied\n remove_last_n : `int`\n Do not do the fit for the last 'n' parameters\n If not specified, it defaults to 2\n\n Returns\n ----------\n array_of_polyfit_1_parameterizations : `np.array`\n Array contaning output 1d ``parameterizations\n \"\"\"\n\n if remove_last_n is None:\n remove_last_n = 2\n\n list_of_defocuses_int = self.transform_list_of_defocuses_from_str_to_float(list_of_defocuses_input)\n if remove_last_n > 0:\n array_of_allparameters = array_of_allparameters[:, :-remove_last_n]\n\n if zmax <= 22:\n len_of_iterations = array_of_allparameters.shape[1]\n else:\n len_of_iterations = 42 + zmax - 22\n\n list_of_polyfit_1_parameter = []\n for i in range(len_of_iterations):\n # logging.info([i,array_of_allparameters.shape[1]])\n if i < array_of_allparameters.shape[1]:\n # logging.info('i'+str(i)+' '+str(array_of_allparameters[:,i]))\n polyfit_1_parameter = np.polyfit(\n x=list_of_defocuses_int, y=array_of_allparameters[:, i], deg=1)\n else:\n # logging.info('i'+str(i)+' '+'None')\n # if you have no input for such high level of Zernike, set it at zero\n polyfit_1_parameter = np.array([0, 0])\n\n # logging.info('i_polyfit'+str(i)+' '+str(polyfit_1_parameter))\n list_of_polyfit_1_parameter.append(polyfit_1_parameter)\n\n array_of_polyfit_1_parameterizations = np.array(list_of_polyfit_1_parameter)\n\n # list_of_defocuses_output_int=self.transform_list_of_defocuses_from_str_to_float(list_of_defocuses_input)\n # list_of_allparameters=[]\n # for i in list_of_defocuses_output_int:\n # allparameters_proposal_single=self.create_allparameters_single(i,array_of_polyfit_1_parameterizations,zmax=self.zmax)\n # list_of_allparameters.append(allparameters_proposal_single)\n\n return array_of_polyfit_1_parameterizations\n\n def lnlike_Neven_multi_same_spot(self, list_of_allparameters_input, return_Images=False,\n use_only_chi=False, multi_background_factor=3):\n \"\"\"Create model images and estimate their quality\n\n Creates model images, and compares them to supplied data\n\n Parameters\n ----------\n list_of_allparameters_input : `list`\n List of parameteres to create image at each defocus\n return_Images : `bool`\n If True, return all the created images and auxiliary data\n use_only_chi : `bool`\n If True, use chi as the quality measure\n If False, use chi**2 as the quality measure\n multi_background_factor : `int`\n Only consider pixels with flux above this factor * background level\n\n Returns\n ----------\n (if return_Images is False):\n mean_res_of_multi_same_spot : `float`\n Mean quality of all images\n (if return_Images is True):\n mean_res_of_multi_same_spot [index 0] : `float`\n Mean quality of all images\n list_of_single_res [index 1] : `list`\n Quality per image\n list_of_single_model_image [index 2] : `list`\n List of created model images\n list_of_single_allparameters [index 3] : `list`\n List of parameters per image\n list_of_single_chi_results [index 4] : `list`\n List of arrays describing quality of fitting\n Each of these array contains\n 0. chi2_max value, 1. Qvalue, 2. (chi or chi2)/d.o.f., 3. (chi2 or chi2_max)/d.o.f.\n array_of_psf_positions_output [index 5] : `np.array`\n Array showing the centering of images\n \"\"\"\n self.use_only_chi = use_only_chi\n\n list_of_single_res = []\n if return_Images:\n list_of_single_model_image = []\n list_of_single_allparameters = []\n list_of_single_chi_results = []\n\n if len(self.list_of_sci_images) == len(list_of_allparameters_input):\n list_of_allparameters = np.copy(list_of_allparameters_input)\n\n else:\n allparametrization = list_of_allparameters_input\n\n # logging.info('self.list_of_defocuses: ' + str(self.list_of_defocuses))\n # logging.info('allparametrization.type: ' + str(allparametrization.type))\n list_of_allparameters = self.create_list_of_allparameters(\n allparametrization, list_of_defocuses=self.list_of_defocuses)\n\n if self.verbosity == 1:\n logging.info('Starting LN_PFS_multi_same_spot for parameters-hash '\n + str(hash(str(allparametrization.data)))\n + ' at ' + str(time.time()) + ' in thread '\n + str(threading.get_ident()))\n\n assert len(self.list_of_sci_images) == len(list_of_allparameters)\n\n # logging.info(len(self.list_of_sci_images))\n # logging.info(len(list_of_allparameters))\n\n # use same weights, experiment\n # if use_only_chi==True:\n # renormalization_of_var_sum=np.ones((len(self.list_of_sci_images)))*len(self.list_of_sci_images)\n # central_index=int(len(self.list_of_sci_images)/2)\n # renormalization_of_var_sum[central_index]=1\n\n # else:\n\n # find image with lowest variance - pressumably the one in focus\n # array_of_var_sum=np.array(list(map(np.sum,self.list_of_var_images)))\n # index_of_max_var_sum=np.where(array_of_var_sum==np.min(array_of_var_sum))[0][0]\n # find what variance selectes top 20% of pixels\n # this is done to weight more the images in focus and less the image out of focus in the\n # final likelihood result\n # quantile_08_focus=np.quantile(self.list_of_sci_images[index_of_max_var_sum],0.8)\n\n list_of_var_sums = []\n for i in range(len(list_of_allparameters)):\n # taking from create_chi_2_almost function in LN_PFS_single\n\n mask_image = self.list_of_mask_images[i]\n var_image = self.list_of_var_images[i]\n sci_image = self.list_of_sci_images[i]\n # array that has True for values which are good and False for bad values\n inverted_mask = ~mask_image.astype(bool)\n\n try:\n if sci_image.shape[0] == 20:\n multi_background_factor = 3\n\n # logging.info('var_image.shape: '+str(var_image.shape))\n # logging.info('multi_background_factor: '+str(multi_background_factor))\n # logging.info('np.median(var_image[0]): '+str(np.median(var_image[0])))\n # logging.info('np.median(var_image[-1]): '+str(np.median(var_image[-1])))\n # logging.info('np.median(var_image[:,0]): '+str(np.median(var_image[:,0])))\n # logging.info('np.median(var_image[:,-1]): '+str(np.median(var_image[:,-1])))\n mean_value_of_background_via_var = np.mean([np.median(var_image[0]), np.median(\n var_image[-1]), np.median(var_image[:, 0]),\n np.median(var_image[:, -1])]) * multi_background_factor\n # logging.info('mean_value_of_background_via_var: '+str(mean_value_of_background_via_var))\n\n mean_value_of_background_via_sci = np.mean([np.median(sci_image[0]), np.median(\n sci_image[-1]), np.median(sci_image[:, 0]),\n np.median(sci_image[:, -1])]) * multi_background_factor\n # logging.info('mean_value_of_background_via_sci: '+str(mean_value_of_background_via_sci))\n mean_value_of_background = np.max(\n [mean_value_of_background_via_var, mean_value_of_background_via_sci])\n except BaseException:\n pass\n\n # select only images with above 80% percentile of the image with max variance?\n var_image_masked = var_image * inverted_mask\n var_image_masked_without_nan = var_image_masked.ravel()[\n var_image_masked.ravel() > mean_value_of_background]\n\n if use_only_chi:\n # if you level is too high\n if len(var_image_masked_without_nan) == 0:\n var_sum = -1\n else:\n # var_sum=-(1)*(np.sum(np.sqrt(np.abs(var_image_masked_without_nan))))\n var_sum = -1\n\n else:\n\n # if you level is too high\n if len(var_image_masked_without_nan) == 0:\n var_sum = -(1)\n else:\n var_sum = -(1) * (np.mean(np.abs(var_image_masked_without_nan)))\n list_of_var_sums.append(var_sum)\n\n # renormalization needs to be reconsidered?\n array_of_var_sum = np.array(list_of_var_sums)\n max_of_array_of_var_sum = np.max(array_of_var_sum)\n\n renormalization_of_var_sum = array_of_var_sum / max_of_array_of_var_sum\n # logging.info('renormalization_of_var_sum'+str(renormalization_of_var_sum))\n list_of_psf_positions_output = []\n\n for i in range(len(list_of_allparameters)):\n\n # if image is in focus which at this point is the size of image with 20\n\n if (self.list_of_sci_images[i].shape)[0] == 20:\n if self.use_center_of_flux:\n use_center_of_flux = True\n else:\n use_center_of_flux = False\n else:\n use_center_of_flux = False\n\n if self.verbosity == 1:\n logging.info('################################')\n logging.info('analyzing image ' + str(i + 1) + ' out of ' + str(len(list_of_allparameters)))\n logging.info(' ')\n\n # if this is the first image, do the full analysis, generate new pupil and illumination\n if i == 0:\n model_single = LN_PFS_single(\n self.list_of_sci_images[i],\n self.list_of_var_images[i],\n self.list_of_mask_images[i],\n wavelength=self.wavelength,\n dithering=self.dithering,\n save=self.save,\n verbosity=self.verbosity,\n pupil_parameters=self.pupil_parameters,\n use_pupil_parameters=self.use_pupil_parameters,\n use_optPSF=self.use_optPSF,\n use_wf_grid=self.list_of_wf_grid[i],\n zmax=self.zmax,\n extraZernike=self.extraZernike,\n pupilExplicit=self.pupilExplicit,\n simulation_00=self.simulation_00,\n double_sources=self.double_sources,\n double_sources_positions_ratios=self.double_sources_positions_ratios,\n npix=self.npix,\n fit_for_flux=self.fit_for_flux,\n test_run=self.test_run,\n explicit_psf_position=self.list_of_psf_positions[i],\n use_only_chi=self.use_only_chi,\n use_center_of_flux=use_center_of_flux)\n\n res_single_with_intermediate_images = model_single(\n list_of_allparameters[i],\n return_Image=True,\n return_intermediate_images=True,\n use_only_chi=use_only_chi,\n multi_background_factor=multi_background_factor)\n\n if res_single_with_intermediate_images == -np.inf:\n return -np.inf\n if isinstance(res_single_with_intermediate_images, tuple):\n if res_single_with_intermediate_images[0] == -np.inf:\n return -np.inf\n likelihood_result = res_single_with_intermediate_images[0]\n model_image = res_single_with_intermediate_images[1]\n allparameters = res_single_with_intermediate_images[2]\n pupil_explicit_0 = res_single_with_intermediate_images[3]\n # wf_grid_rot = res_single_with_intermediate_images[4]\n chi_results = res_single_with_intermediate_images[5]\n psf_position = res_single_with_intermediate_images[6]\n\n list_of_single_res.append(likelihood_result)\n list_of_psf_positions_output.append(psf_position)\n if return_Images:\n list_of_single_model_image.append(model_image)\n list_of_single_allparameters.append(allparameters)\n list_of_single_chi_results.append(chi_results)\n\n # and if this is not the first image, use the pupil and illumination used in the first image\n else:\n\n model_single = LN_PFS_single(\n self.list_of_sci_images[i],\n self.list_of_var_images[i],\n self.list_of_mask_images[i],\n wavelength=self.wavelength,\n dithering=self.dithering,\n save=self.save,\n verbosity=self.verbosity,\n pupil_parameters=self.pupil_parameters,\n use_pupil_parameters=self.use_pupil_parameters,\n use_optPSF=self.use_optPSF,\n use_wf_grid=self.list_of_wf_grid[i],\n zmax=self.zmax,\n extraZernike=self.extraZernike,\n pupilExplicit=pupil_explicit_0,\n simulation_00=self.simulation_00,\n double_sources=self.double_sources,\n double_sources_positions_ratios=self.double_sources_positions_ratios,\n npix=self.npix,\n fit_for_flux=self.fit_for_flux,\n test_run=self.test_run,\n explicit_psf_position=self.list_of_psf_positions[i],\n use_only_chi=self.use_only_chi,\n use_center_of_flux=use_center_of_flux)\n if not return_Images:\n res_single_without_intermediate_images = model_single(\n list_of_allparameters[i],\n return_Image=return_Images,\n use_only_chi=use_only_chi,\n multi_background_factor=multi_background_factor)\n\n likelihood_result = res_single_without_intermediate_images[0]\n psf_position = res_single_with_intermediate_images[-1]\n # logging.info(likelihood_result)\n list_of_single_res.append(likelihood_result)\n list_of_psf_positions_output.append(psf_position)\n\n if return_Images:\n res_single_with_an_image = model_single(\n list_of_allparameters[i], return_Image=return_Images, use_only_chi=use_only_chi)\n if res_single_with_an_image == -np.inf:\n return -np.inf\n likelihood_result = res_single_with_an_image[0]\n model_image = res_single_with_an_image[1]\n allparameters = res_single_with_an_image[2]\n chi_results = res_single_with_an_image[3]\n psf_position = res_single_with_an_image[-1]\n\n list_of_single_res.append(likelihood_result)\n list_of_single_model_image.append(model_image)\n list_of_single_allparameters.append(allparameters)\n list_of_single_chi_results.append(chi_results)\n list_of_psf_positions_output.append(psf_position)\n # possibly implement intermediate images here\n array_of_single_res = np.array(list_of_single_res)\n array_of_psf_positions_output = np.array(list_of_psf_positions_output)\n\n # renormalization\n if self.verbosity == 1:\n logging.info('################################')\n logging.info('Likelihoods returned per individual images are: ' + str(array_of_single_res))\n logging.info('Mean likelihood is ' + str(np.mean(array_of_single_res)))\n\n # mean_res_of_multi_same_spot=np.mean(array_of_single_res)\n mean_res_of_multi_same_spot = np.mean(array_of_single_res / renormalization_of_var_sum)\n\n if self.verbosity == 1:\n logging.info('################################')\n logging.info('Renormalized likelihoods returned per individual images are: '\n + str(array_of_single_res / renormalization_of_var_sum))\n logging.info('Renormalization factors are: ' + str(renormalization_of_var_sum))\n logging.info('Mean renormalized likelihood is ' + str(mean_res_of_multi_same_spot))\n logging.info('array_of_psf_positions_output: ' + str(array_of_psf_positions_output))\n\n if self.verbosity == 1:\n # logging.info('Ending LN_PFS_multi_same_spot for parameters-hash '+\n # str(hash(str(allparametrization.data)))+' at '+str(time.time())+\n # ' in thread '+str(threading.get_ident()))\n logging.info('Ending LN_PFS_multi_same_spot at time '\n + str(time.time()) + ' in thread ' + str(threading.get_ident()))\n logging.info(' ')\n\n if not return_Images:\n return mean_res_of_multi_same_spot\n if return_Images:\n # 0. mean_res_of_multi_same_spot - mean likelihood per images, renormalized\n # 1. list_of_single_res - likelihood per image, not renormalized\n # 2. list_of_single_model_image - list of created model images\n # 3. list_of_single_allparameters - list of parameters per image?\n # 4. list_of_single_chi_results - list of arrays describing quality of fitting\n # 1. chi2_max value, 2. Qvalue, 3. chi2/d.o.f., 4. chi2_max/d.o.f.\n # 5. array_of_psf_positions_output - list showing the centering of images\n\n return mean_res_of_multi_same_spot, list_of_single_res, list_of_single_model_image,\\\n list_of_single_allparameters, list_of_single_chi_results, array_of_psf_positions_output\n\n def __call__(\n self,\n list_of_allparameters,\n return_Images=False,\n use_only_chi=False,\n multi_background_factor=3):\n\n return self.lnlike_Neven_multi_same_spot(list_of_allparameters, return_Images=return_Images,\n use_only_chi=use_only_chi,\n multi_background_factor=multi_background_factor)\n\n\nclass Tokovinin_multi(object):\n\n \"\"\"\n\n # improvments possible - modify by how much to move parameters based on the previous step\n # in simplied H, take new model into account where doing changes\n\n\n outputs:\n initial_model_result,final_model_result,\\\n list_of_initial_model_result,list_of_final_model_result,\\\n out_images, pre_images, list_of_image_final,\\\n allparameters_parametrization_proposal, allparameters_parametrization_proposal_after_iteration,\\\n list_of_initial_input_parameters, list_of_finalinput_parameters,\\\n list_of_pre_chi2,list_of_after_chi2,\\\n list_of_psf_positions,list_of_final_psf_positions,\\\n [uber_images_normalized,uber_M0_std,H_std,array_of_delta_z_parametrizations_None,list_of_final_psf_positions]\n\n explanation of the results:\n 0. likelihood averaged over all images (before the function)\n 1. likelihood averaged over all images (after the function)\n 2. likelihood per image (output from model_multi) (before the function)\n 3. likelihood per image (output from model_multi) (after the function)\n 4. out_images\n 5. list of initial model images\n 6. list of final model images\n 7. parametrization before the function\n 8. parametrization after the function\n 9. list of parameters per image (before the function)\n 10. list of parameters per image (after the function)\n 11. list of chi2 per image (before the function)\n 12. list of chi2 per image (after the function)\n 13. list of psf position of image (function the function)\n 14. list of psf position of image (after the function)\n\n 15. [uber_images_normalized,uber_M0,H,array_of_delta_z_parametrizations_None,list_of_final_psf_positions]\n 15.0. uber_images_normalized\n 15.1. uber_M0\n 15.2. H\n 15.3. array_of_delta_z_parametrizations_None\n 15.4. list_of_final_psf_positions\n\n\n\n \"\"\"\n\n def __init__(self, list_of_sci_images, list_of_var_images, list_of_mask_images=None,\n wavelength=None, dithering=None, save=None, verbosity=None,\n pupil_parameters=None, use_pupil_parameters=None, use_optPSF=None, list_of_wf_grid=None,\n zmax=None, extraZernike=None, pupilExplicit=None, simulation_00=None,\n double_sources=None, double_sources_positions_ratios=None, npix=None,\n list_of_defocuses=None, fit_for_flux=True, test_run=False, list_of_psf_positions=None,\n num_iter=None, move_allparameters=None, pool=None):\n \"\"\"\n @param list_of_sci_images list of science images, list of 2d array\n @param list_of_var_images list of variance images, 2d arrays,\n which are the same size as sci_image\n @param list_of_mask_images list of mask images, 2d arrays,\n which are the same size as sci_image\n @param wavelength wavelength in nm, to be passed to to module\n @param dithering dithering, 1=normal, 2=two times higher resolution,\n 3=not supported\n @param save save intermediate result in the process\n (set value at 1 for saving)\n @param verbosity verbosity of the process\n (set value at 2 for full output,\n 1 only in Tokovinin, 0==nothing)\n\n @param pupil_parameters\n @param use_pupil_parameters\n @param use_optPSF\n\n @param zmax largest Zernike order used (11 or 22, or larger than 22)\n @param extraZernike array consisting of higher order zernike\n (if using higher order than 22)\n @param pupilExplicit\n\n @param simulation_00 resulting image will be centered with optical center\n in the center of the image\n and not fitted acorrding to the sci_image\n @param double_sources 1 if there are other secondary sources in the image\n @param double_sources_positions_ratios / arrray with parameters describing relative position\\\n and relative flux of the secondary source(s)\n @param npxix size of the pupil (1536 reccomended)\n @param list_of_defocuses list of defocuses at which images are taken\n (float or string?)\n\n @param fit_for_flux automatically fit for the best flux level\n that minimizes the chi**2\n @param test_run if True, skips the creation of model and\n return science image - useful for testing\n interaction of outputs of the module\n in broader setting quickly\n\n @param list_of_psf_positions gives position of the opt_psf\n @param num_iter number of iteration\n @param move_allparameters if True change all parameters i.e.,\n also ``global'' parameters, i.e.,\n not just wavefront parameters\n @param pool pass pool of workers to calculate\n\n array of changes due to movement due to wavefront changes\n \"\"\"\n\n if verbosity is None:\n verbosity = 0\n\n if use_pupil_parameters is not None:\n assert pupil_parameters is not None\n\n if double_sources is not None and double_sources is not False:\n assert np.sum(np.abs(double_sources_positions_ratios)) > 0\n\n if zmax is None:\n zmax = 22\n\n if zmax == 11:\n self.columns = [\n 'z4',\n 'z5',\n 'z6',\n 'z7',\n 'z8',\n 'z9',\n 'z10',\n 'z11',\n 'detFrac',\n 'strutFrac',\n 'dxFocal',\n 'dyFocal',\n 'slitFrac',\n 'slitFrac_dy',\n 'wide_0',\n 'wide_23',\n 'wide_43',\n 'misalign',\n 'x_fiber',\n 'y_fiber',\n 'effective_ilum_radius',\n 'frd_sigma',\n 'frd_lorentz_factor',\n 'det_vert',\n 'slitHolder_frac_dx',\n 'grating_lines',\n 'scattering_slope',\n 'scattering_amplitude',\n 'pixel_effect',\n 'fiber_r',\n 'flux']\n if zmax >= 22:\n self.columns = [\n 'z4',\n 'z5',\n 'z6',\n 'z7',\n 'z8',\n 'z9',\n 'z10',\n 'z11',\n 'z12',\n 'z13',\n 'z14',\n 'z15',\n 'z16',\n 'z17',\n 'z18',\n 'z19',\n 'z20',\n 'z21',\n 'z22',\n 'detFrac',\n 'strutFrac',\n 'dxFocal',\n 'dyFocal',\n 'slitFrac',\n 'slitFrac_dy',\n 'wide_0',\n 'wide_23',\n 'wide_43',\n 'misalign',\n 'x_fiber',\n 'y_fiber',\n 'effective_ilum_radius',\n 'frd_sigma',\n 'frd_lorentz_factor',\n 'det_vert',\n 'slitHolder_frac_dx',\n 'grating_lines',\n 'scattering_slope',\n 'scattering_amplitude',\n 'pixel_effect',\n 'fiber_r',\n 'flux']\n\n self.list_of_sci_images = list_of_sci_images\n self.list_of_var_images = list_of_var_images\n\n if list_of_mask_images is None:\n list_of_mask_images = []\n for i in range(len(list_of_sci_images)):\n mask_image = np.zeros(list_of_sci_images[i].shape)\n list_of_mask_images.append(mask_image)\n\n self.list_of_mask_images = list_of_mask_images\n\n # implement custom variance image here\n\n # self.mask_image=mask_image\n # self.sci_image=sci_image\n # self.var_image=var_image\n self.wavelength = wavelength\n self.dithering = dithering\n self.save = save\n self.pupil_parameters = pupil_parameters\n self.use_pupil_parameters = use_pupil_parameters\n self.use_optPSF = use_optPSF\n self.pupilExplicit = pupilExplicit\n self.simulation_00 = simulation_00\n self.zmax = zmax\n self.extraZernike = extraZernike\n self.verbosity = verbosity\n self.double_sources = double_sources\n self.double_sources_positions_ratios = double_sources_positions_ratios\n self.npix = npix\n self.fit_for_flux = fit_for_flux\n self.list_of_defocuses = list_of_defocuses\n self.test_run = test_run\n if list_of_psf_positions is None:\n list_of_psf_positions = [None] * len(list_of_sci_images)\n self.list_of_psf_positions = list_of_psf_positions\n if list_of_wf_grid is None:\n list_of_wf_grid = [None] * len(list_of_sci_images)\n self.list_of_wf_grid = list_of_wf_grid\n self.list_of_defocuses = list_of_defocuses\n self.move_allparameters = move_allparameters\n self.num_iter = num_iter\n self.pool = pool\n\n if self.verbosity >= 1:\n self.verbosity_model = self.verbosity - 1\n else:\n self.verbosity_model = self.verbosity\n\n # parameter that control if the intermediate outputs are saved to the hard disk\n save = False\n self.save = save\n\n def Tokovinin_algorithm_chi_multi(self, allparameters_parametrization_proposal,\n return_Images=False, num_iter=None, previous_best_result=None,\n use_only_chi=False, multi_background_factor=3, up_to_which_z=None):\n \"\"\" Apply Tokovinin algorithm to a set of images\n\n Parameters\n ----------\n allparameters_parametrization_proposal : `np.array`\n 2d parametrization of variables\n return_Images : `bool`\n if True, also return created images\n num_iter : `int`\n number of iteration, used when creating save files\n previous_best_result : `np.array?`\n output from previous Tokovinin run\n use_only_chi : `bool`\n if True, optimize using chi, not chi**2\n multi_background_factor : `float`\n take into account only pixels with flux this many times above the background\n\n Returns\n ----------\n (if return_Images == False)\n final_model_result : `float`\n averaged ``likelihood'' over all images\n\n (if return_Images == True AND previous_best_result is None )\n initial_model_result :\n explanation\n final_model_result : `float`\n output with index 0 from model_multi - averaged ``likelihood'' over all input images\n if the proposed images has worse quality then input, reproduce the input value\n list_of_initial_model_result :\n explanation\n list_of_final_model_result :\n explanation\n allparameters_parametrization_proposal :\n explanation\n allparameters_parametrization_proposal_after_iteration :\n explanation\n list_of_initial_input_parameters :\n explanation\n list_of_finalinput_parameters :\n explanation\n list_of_pre_chi2 :\n explanation\n list_of_after_chi2 :\n explanation\n list_of_psf_positions :\n explanation\n list_of_final_psf_positions :\n explanation\n [uber_images_normalized, uber_M0_std, H_std,\n array_of_delta_z_parametrizations_None, list_of_final_psf_positions]:\n explanation\n\n (if return_Images == True AND previous_best_result is avaliable )\n\n\n\n\n\n \"\"\"\n\n if self.verbosity >= 1:\n logging.info('###############################################################################')\n logging.info('###############################################################################')\n logging.info('Starting Tokovinin_algorithm_chi_multi with num_iter: ' + str(num_iter))\n logging.info('Tokovinin, return_Images: ' + str(return_Images))\n logging.info('Tokovinin, num_iter: ' + str(num_iter))\n logging.info('Tokovinin, use_only_chi: ' + str(use_only_chi))\n logging.info('Tokovinin, multi_background_factor: ' + str(multi_background_factor))\n\n logging.info('allparameters_parametrization_proposal'\n + str(allparameters_parametrization_proposal))\n logging.info('allparameters_parametrization_proposal.shape'\n + str(allparameters_parametrization_proposal.shape))\n\n list_of_sci_images = self.list_of_sci_images\n list_of_var_images = self.list_of_var_images\n list_of_mask_images = self.list_of_mask_images\n\n double_sources_positions_ratios = self.double_sources_positions_ratios\n list_of_defocuses_input_long = self.list_of_defocuses\n\n if num_iter is None:\n if self.num_iter is not None:\n num_iter = self.num_iter\n\n move_allparameters = self.move_allparameters\n\n # if you passed previous best result, set the list_of_explicit_psf_positions\n # by default it is put as the last element in the last cell in the previous_best_result output\n if previous_best_result is not None:\n # to be compatible with versions before 0.45\n if len(previous_best_result) == 5:\n self.list_of_psf_positions = previous_best_result[-1]\n else:\n self.list_of_psf_positions = previous_best_result[-1][-1]\n\n ##########################################################################\n # Create initial modeling as basis for future effort\n # the outputs of this section are 0. pre_model_result, 1. model_results, 2. pre_images,\n # 3. pre_input_parameters, 4. chi_2_before_iteration_array, 5. list_of_psf_positions\n if self.verbosity >= 1:\n logging.info('list_of_defocuses analyzed: ' + str(list_of_defocuses_input_long))\n\n # logging.info('list_of_sci_images'+str(list_of_sci_images))\n # logging.info('list_of_var_images'+str(list_of_var_images))\n # logging.info('list_of_mask_images'+str(list_of_mask_images))\n # logging.info('wavelength'+str(self.wavelength))\n # logging.info('dithering'+str(self.dithering))\n # logging.info('self.save'+str(self.save))\n # logging.info('self.zmax'+str(self.zmax))\n # logging.info('self.double_sources'+str(self.double_sources))\n # logging.info('self.double_sources_positions_ratios'+str(self.double_sources_positions_ratios))\n # logging.info('self.npix'+str(self.npix))\n # logging.info('self.list_of_defocuses_input_long'+str(list_of_defocuses_input_long))\n # logging.info('self.fit_for_flux'+str(self.fit_for_flux))\n # logging.info('self.test_run'+str(self.test_run))\n # logging.info('self.list_of_psf_positions'+str(self.list_of_psf_positions))\n\n model_multi = LN_PFS_multi_same_spot(\n list_of_sci_images,\n list_of_var_images,\n list_of_mask_images=list_of_mask_images,\n wavelength=self.wavelength,\n dithering=self.dithering,\n save=self.save,\n zmax=self.zmax,\n verbosity=self.verbosity_model,\n double_sources=self.double_sources,\n double_sources_positions_ratios=self.double_sources_positions_ratios,\n npix=self.npix,\n list_of_defocuses=list_of_defocuses_input_long,\n fit_for_flux=self.fit_for_flux,\n test_run=self.test_run,\n list_of_psf_positions=self.list_of_psf_positions)\n\n if self.verbosity >= 1:\n logging.info('****************************')\n logging.info('Starting Tokovinin procedure with num_iter: ' + str(num_iter))\n logging.info('Initial testing proposal is: ' + str(allparameters_parametrization_proposal))\n time_start_single = time.time()\n\n # create list of minchains, one per each image\n list_of_minchain = model_multi.create_list_of_allparameters(\n allparameters_parametrization_proposal,\n list_of_defocuses=list_of_defocuses_input_long,\n zmax=self.zmax)\n\n # if the parametrization is 2d array, move it into 1d shape\n if len(allparameters_parametrization_proposal.shape) == 2:\n allparameters_parametrization_proposal = move_parametrizations_from_2d_shape_to_1d_shape(\n allparameters_parametrization_proposal)\n\n if self.verbosity >= 1:\n logging.info('Starting premodel analysis with num_iter: ' + str(num_iter))\n\n # results from initial run, before running fitting algorithm\n # pre_model_result - mean likelihood across all images, renormalized\n # model_results - likelihood per image, not renormalized\n # pre_images - list of created model images\n # pre_input_parameters - list of parameters per image?\n # chi_2_before_iteration_array - list of lists describing quality of fitting\n # list_of_psf_positions -?\n try:\n # logging.info('len(list_of_minchain): '+str(len(list_of_minchain)))\n # logging.info('list_of_minchain[0] '+str(list_of_minchain[0]))\n # logging.info('multi_background_factor: '+str(multi_background_factor))\n # logging.info('type'+str(type(multi_background_factor)))\n # logging.info('up_to_which_z: '+str(up_to_which_z))\n # logging.info(str( list_of_minchain))\n # logging.info('use_only_chi: '+str( use_only_chi))\n # logging.info('list_of_minchain: '+str( list_of_minchain))\n\n pre_model_result, model_results, pre_images, pre_input_parameters, chi_2_before_iteration_array,\\\n list_of_psf_positions =\\\n model_multi(list_of_minchain, return_Images=True, use_only_chi=use_only_chi,\n multi_background_factor=multi_background_factor)\n # modify variance image according to the models that have just been created\n # first time modifying variance image\n list_of_single_model_image = pre_images\n list_of_var_images_via_model = []\n for index_of_single_image in range(len(list_of_sci_images)):\n popt = create_popt_for_custom_var(self.list_of_sci_images[index_of_single_image],\n self.list_of_var_images[index_of_single_image],\n self.list_of_mask_images[index_of_single_image])\n single_var_image_via_model =\\\n create_custom_var_from_popt(list_of_single_model_image[index_of_single_image], popt)\n list_of_var_images_via_model.append(single_var_image_via_model)\n\n # replace the variance images provided with these custom variance images\n list_of_var_images = list_of_var_images_via_model\n # self.list_of_var_images = list_of_var_images\n\n except Exception as e:\n logging.info('Exception is: ' + str(e))\n logging.info('Exception type is: ' + str(repr(e)))\n logging.info(traceback.logging.info_exc())\n if self.verbosity >= 1:\n logging.info('Premodel analysis failed')\n # if the modelling failed\n # returning 7 nan values to be consistent with what would be the return if the algorithm passed\n # at position 0 return extremly likelihood to indicate failure\n # at position 3 return the input parametrization\n # return -9999999,np.nan,np.nan,allparameters_parametrization_proposal,np.nan,np.nan,np.nan\n return -9999999, -9999999, np.nan, np.nan, np.nan, np.nan, np.nan,\n allparameters_parametrization_proposal, allparameters_parametrization_proposal,\n np.nan, np.nan, np.nan, np.nan, np.nan, np.nan\n\n if self.verbosity >= 1:\n logging.info('list_of_psf_positions at the input stage: ' + str(np.array(list_of_psf_positions)))\n\n if self.save:\n np.save('/tigress/ncaplar/Results/allparameters_parametrization_proposal_' + str(num_iter),\n allparameters_parametrization_proposal)\n np.save('/tigress/ncaplar/Results/pre_images_' + str(num_iter),\n pre_images)\n np.save('/tigress/ncaplar/Results/pre_input_parameters_' + str(num_iter),\n pre_input_parameters)\n np.save('/tigress/ncaplar/Results/list_of_sci_images_' + str(num_iter),\n list_of_sci_images)\n np.save('/tigress/ncaplar/Results/list_of_var_images_' + str(num_iter),\n list_of_var_images)\n np.save('/tigress/ncaplar/Results/list_of_mask_images_' + str(num_iter),\n list_of_mask_images)\n\n # extract the parameters which will not change in this function, i.e., non-wavefront parameters\n nonwavefront_par = list_of_minchain[0][19:42]\n time_end_single = time.time()\n if self.verbosity >= 1:\n logging.info('Total time taken for premodel analysis with num_iter ' + str(num_iter)\n + ' was ' + str(time_end_single - time_start_single) + ' seconds')\n logging.info('chi_2_before_iteration is: ' + str(chi_2_before_iteration_array))\n\n logging.info('Ended premodel analysis ')\n logging.info('***********************')\n\n # import science images and determine the flux mask\n list_of_mean_value_of_background = []\n list_of_flux_mask = []\n list_of_sci_image_std = []\n for i in range(len(list_of_sci_images)):\n sci_image = list_of_sci_images[i]\n var_image = list_of_var_images[i]\n\n # do not use this for images in focus or near focus\n # probably needs to be done better than via shape measurment\n if sci_image.shape[0] == 20:\n multi_background_factor = 3\n\n mean_value_of_background_via_var = np.mean([np.median(var_image[0]), np.median(\n var_image[-1]), np.median(var_image[:, 0]),\n np.median(var_image[:, -1])]) * multi_background_factor\n\n mean_value_of_background_via_sci = np.mean([np.median(sci_image[0]), np.median(\n sci_image[-1]), np.median(sci_image[:, 0]),\n np.median(sci_image[:, -1])]) * multi_background_factor\n\n mean_value_of_background = np.max(\n [mean_value_of_background_via_var, mean_value_of_background_via_sci])\n if self.verbosity > 1:\n logging.info(\n str(multi_background_factor) + 'x mean_value_of_background in image with index'\n + str(i) + ' is estimated to be: ' + str(mean_value_of_background))\n\n list_of_mean_value_of_background.append(mean_value_of_background)\n\n list_of_flux_mask = []\n for i in range(len(list_of_sci_images)):\n sci_image = list_of_sci_images[i]\n var_image = list_of_var_images[i]\n flux_mask = sci_image > (list_of_mean_value_of_background[i])\n # normalized science image\n\n sci_image_std = sci_image / np.sqrt(var_image)\n list_of_sci_image_std.append(sci_image_std)\n list_of_flux_mask.append(flux_mask)\n\n # find postions for focus image in the raveled images\n if len(list_of_flux_mask) > 1:\n len_of_flux_masks = np.array(list(map(np.sum, list_of_flux_mask)))\n position_of_most_focus_image = np.where(len_of_flux_masks == np.min(len_of_flux_masks))[0][0]\n position_focus_1 = np.sum(len_of_flux_masks[:position_of_most_focus_image])\n position_focus_2 = np.sum(len_of_flux_masks[:position_of_most_focus_image + 1])\n\n self.list_of_flux_mask = list_of_flux_mask\n self.list_of_sci_image_std = list_of_sci_image_std\n ##########################################################################\n # masked science image\n list_of_I = []\n list_of_I_std = []\n list_of_std_image = []\n for i in range(len(list_of_sci_images)):\n\n sci_image = list_of_sci_images[i]\n sci_image_std = list_of_sci_image_std[i]\n flux_mask = list_of_flux_mask[i]\n std_image = np.sqrt(list_of_var_images[i][flux_mask]).ravel()\n\n # using variable name `I` to match the original source paper\n I = sci_image[flux_mask].ravel() # noqa: E741\n # I=((sci_image[flux_mask])/np.sum(sci_image[flux_mask])).ravel()\n I_std = ((sci_image_std[flux_mask]) / 1).ravel()\n # I_std=((sci_image_std[flux_mask])/np.sum(sci_image_std[flux_mask])).ravel()\n\n list_of_I.append(I)\n list_of_std_image.append(std_image)\n list_of_I_std.append(I_std)\n\n # addition May22\n # array_of_sci_image_std = np.array(list_of_sci_image_std)\n list_of_std_sum = []\n for i in range(len(list_of_sci_image_std)):\n list_of_std_sum.append(np.sum(list_of_std_image[i]))\n\n array_of_std_sum = np.array(list_of_std_sum)\n array_of_std_sum = array_of_std_sum / np.min(array_of_std_sum)\n\n list_of_std_image_renormalized = []\n for i in range(len(list_of_std_image)):\n list_of_std_image_renormalized.append(list_of_std_image[i] * array_of_std_sum[i])\n #\n uber_std = [item for sublist in list_of_std_image_renormalized for item in sublist]\n\n # join all I,I_std from all individual images into one uber I,I_std\n uber_I = [item for sublist in list_of_I for item in sublist]\n # uber_std=[item for sublist in list_of_std_image for item in sublist]\n # uber_I_std=[item for sublist in list_of_I_std for item in sublist]\n\n uber_I = np.array(uber_I)\n uber_std = np.array(uber_std)\n\n uber_I_std = uber_I / uber_std\n\n if self.save:\n np.save('/tigress/ncaplar/Results/list_of_sci_images_' + str(num_iter),\n list_of_sci_images)\n np.save('/tigress/ncaplar/Results/list_of_mean_value_of_background_' + str(num_iter),\n list_of_mean_value_of_background)\n np.save('/tigress/ncaplar/Results/list_of_flux_mask_' + str(num_iter),\n list_of_flux_mask)\n np.save('/tigress/ncaplar/Results/uber_std_' + str(num_iter),\n uber_std)\n np.save('/tigress/ncaplar/Results/uber_I_' + str(num_iter),\n uber_I)\n\n # March 14, 2022, adding just pure avoid of the run\n if up_to_which_z is False:\n # 0. likelihood averaged over all images (before the function)\n # 1. likelihood averaged over all images (before the function)\n # 2. likelihood per image (output from model_multi) (before the function)\n # 3. likelihood per image (output from model_multi) (before the function)\n # 4. out_images\n # 5. list of initial model images\n # 6. list of initial model images\n # 7. parametrization before the function\n # 8. parametrization after the function\n # 9. list of parameters per image (before the function)\n # 10. list of parameters per image (after the function)\n # 11. list of chi2 per image (before the function)\n # 12. list of chi2 per image (after the function)\n # 13. list of psf position of image (before the function)\n # 14. list of psf position of image (after the function)\n\n initial_model_result, list_of_initial_model_result, list_of_image_0,\\\n list_of_initial_input_parameters, list_of_pre_chi2, list_of_psf_positions =\\\n pre_model_result, model_results, pre_images, pre_input_parameters,\\\n chi_2_before_iteration_array, list_of_psf_positions\n\n if previous_best_result is None:\n return initial_model_result, initial_model_result,\\\n list_of_initial_model_result, list_of_initial_model_result,\\\n None, pre_images, pre_images,\\\n allparameters_parametrization_proposal,\\\n allparameters_parametrization_proposal,\\\n list_of_initial_input_parameters, list_of_initial_input_parameters,\\\n list_of_pre_chi2, list_of_pre_chi2,\\\n list_of_psf_positions, list_of_psf_positions,\\\n [None, None, None, None, None]\n else:\n return initial_model_result, initial_model_result,\\\n list_of_initial_model_result, list_of_initial_model_result,\\\n None, pre_images, pre_images,\\\n allparameters_parametrization_proposal,\\\n allparameters_parametrization_proposal,\\\n list_of_initial_input_parameters, list_of_initial_input_parameters,\\\n list_of_pre_chi2, list_of_pre_chi2,\\\n list_of_psf_positions, list_of_psf_positions\n\n # set number of extra Zernike\n # number_of_extra_zernike=0\n # twentytwo_or_extra=22\n # numbers that make sense are 11,22,37,56,79,106,137,172,211,254\n\n # if number_of_extra_zernike is None:\n # number_of_extra_zernike=0\n # else:\n number_of_extra_zernike = self.zmax - 22\n\n ##########################################################################\n # Start of the iterative process\n\n number_of_non_decreses = [0]\n\n for iteration_number in range(1):\n\n if iteration_number == 0:\n\n # initial SVD treshold\n thresh0 = 0.02\n else:\n pass\n\n ##########################################################################\n # starting real iterative process here\n # create changes in parametrizations\n\n # list of how much to move Zernike coefficents\n # list_of_delta_z=[]\n # for z_par in range(3,22+number_of_extra_zernike):\n # list_of_delta_z.append(0.5/((np.sqrt(8.*(z_par+1.)-6.)-1.)/2.))\n\n # list of how much to move Zernike coefficents\n # possibly needs to me modified to be smarther and take into account that\n # every second parameter gets ``amplified'' in defocus\n # list_of_delta_z_parametrizations=[]\n # for z_par in range(0,19*2+2*number_of_extra_zernike):\n # list_of_delta_z_parametrizations.append(0.5/((np.sqrt(8.*(z_par+1.)-6.)-1.)/2.))\n\n # this should produce reasonable changes in multi analysis\n list_of_delta_z_parametrizations = []\n for z_par in range(0, 19 * 2 + 2 * number_of_extra_zernike):\n z_par_i = z_par + 4\n # if this is the parameter that change\n if np.mod(z_par_i, 2) == 0:\n list_of_delta_z_parametrizations.append(0.1 * 0.05 / np.sqrt(z_par_i))\n if np.mod(z_par_i, 2) == 1:\n list_of_delta_z_parametrizations.append(0.05 / np.sqrt(z_par_i))\n\n array_of_delta_z_parametrizations = np.array(list_of_delta_z_parametrizations) * (1)\n\n if iteration_number == 0:\n pass\n else:\n # array_of_delta_z_parametrizations=first_proposal_Tokovnin/4\n array_of_delta_z_parametrizations = np.maximum(\n array_of_delta_z_parametrizations, first_proposal_Tokovnin / 4) # noqa\n\n # this code might work with global parameters?\n array_of_delta_global_parametrizations = np.array([0.1, 0.02, 0.1, 0.1, 0.1, 0.1,\n 0.3, 1, 0.1, 0.1,\n 0.15, 0.15, 0.1,\n 0.07, 0.05, 0.05, 0.4,\n 30000, 0.5, 0.001,\n 0.05, 0.05, 0.01])\n # array_of_delta_global_parametrizations=array_of_delta_global_parametrizations/1\n array_of_delta_global_parametrizations = array_of_delta_global_parametrizations / 10\n\n if move_allparameters:\n array_of_delta_all_parametrizations = np.concatenate(\n (array_of_delta_z_parametrizations[0:19 * 2], array_of_delta_global_parametrizations,\n array_of_delta_z_parametrizations[19 * 2:]))\n\n if self.save:\n np.save('/tigress/ncaplar/Results/array_of_delta_z_parametrizations_'\n + str(num_iter) + '_' + str(iteration_number), array_of_delta_z_parametrizations)\n np.save('/tigress/ncaplar/Results/array_of_delta_global_parametrizations_'\n + str(num_iter) + '_' + str(iteration_number), array_of_delta_global_parametrizations)\n if move_allparameters:\n np.save('/tigress/ncaplar/Results/array_of_delta_all_parametrizations_'\n + str(num_iter) + '_' + str(iteration_number),\n array_of_delta_all_parametrizations)\n\n # initialize\n # if this is the first iteration of the iterative algorithm\n if iteration_number == 0:\n\n thresh = thresh0\n all_global_parametrization_old = allparameters_parametrization_proposal[19 * 2:19 * 2 + 23]\n if number_of_extra_zernike == 0:\n all_wavefront_z_parametrization_old = allparameters_parametrization_proposal[0:19 * 2]\n else:\n # if you want more Zernike\n if len(allparameters_parametrization_proposal) == 19 * 2 + 23:\n # if you did not pass explicit extra Zernike, start with zeroes\n all_wavefront_z_parametrization_old = np.concatenate(\n (allparameters_parametrization_proposal[0:19 * 2],\n np.zeros(2 * number_of_extra_zernike)))\n else:\n all_wavefront_z_parametrization_old = np.concatenate(\n (allparameters_parametrization_proposal[0:19 * 2],\n allparameters_parametrization_proposal[19 * 2 + 23:]))\n\n pass\n # if this is not a first iteration\n else:\n # errors in the typechecker for 10 lines below are fine\n if self.verbosity == 1:\n logging.info('array_of_delta_z in ' + str(iteration_number) + ' '\n + str(array_of_delta_z_parametrizations))\n # code analysis programs might suggest that there is an error here, but everything is ok\n # chi_2_before_iteration=np.copy(chi_2_after_iteration)\n # copy wavefront from the end of the previous iteration\n\n all_wavefront_z_parametrization_old = np.copy(all_wavefront_z_parametrization_new) # noqa\n if move_allparameters:\n all_global_parametrization_old = np.copy(all_global_parametrization_new) # noqa\n if self.verbosity >= 1:\n if did_chi_2_improve == 1: # noqa\n logging.info('did_chi_2_improve: yes')\n else:\n logging.info('did_chi_2_improve: no')\n if did_chi_2_improve == 0: # noqa\n thresh = thresh0\n else:\n thresh = thresh * 0.5\n\n ##########################################################################\n # create a model with input parameters from previous iteration\n\n list_of_all_wavefront_z_parameterization = []\n\n up_to_z22_parametrization_start = all_wavefront_z_parametrization_old[0:19 * 2]\n from_z22_parametrization_start = all_wavefront_z_parametrization_old[19 * 2:]\n global_parametrization_start = all_global_parametrization_old\n\n if self.verbosity >= 1:\n logging.info('up_to_z22_parametrization_start: ' + str(up_to_z22_parametrization_start))\n logging.info('nonwavefront_par: ' + str(nonwavefront_par))\n logging.info('from_z22_parametrization_start' + str(from_z22_parametrization_start))\n\n # logging.info('iteration '+str(iteration_number)+' shape of up_to_z22_parametrization_start is:\n # '+str(up_to_z22_parametrization_start.shape))\n if move_allparameters:\n initial_input_parameterization = np.concatenate(\n (up_to_z22_parametrization_start, global_parametrization_start,\n from_z22_parametrization_start))\n else:\n initial_input_parameterization = np.concatenate(\n (up_to_z22_parametrization_start, nonwavefront_par, from_z22_parametrization_start))\n\n if self.verbosity >= 1:\n logging.info(\n 'initial input parameters in iteration ' + str(iteration_number) + ' are: '\n + str(initial_input_parameterization))\n logging.info(\n 'moving input wavefront parameters in iteration ' + str(iteration_number) + ' by: '\n + str(array_of_delta_z_parametrizations))\n if move_allparameters:\n logging.info(\n 'moving global input parameters in iteration ' + str(iteration_number) + ' by: '\n + str(array_of_delta_global_parametrizations))\n\n if self.save:\n np.save('/tigress/ncaplar/Results/initial_input_parameterization_'\n + str(num_iter) + '_' + str(iteration_number), initial_input_parameterization)\n\n # logging.info('len initial_input_parameterization '+str(len(initial_input_parameterization)))\n\n list_of_minchain = model_multi.create_list_of_allparameters(\n initial_input_parameterization, list_of_defocuses=list_of_defocuses_input_long,\n zmax=self.zmax)\n # list_of_minchain=model_multi.create_list_of_allparameters(allparameters_parametrization_proposal,list_of_defocuses=list_of_defocuses_input_long,zmax=56)\n\n # moved in under `else` statment\n # res_multi=model_multi(list_of_minchain,return_Images=True,use_only_chi=use_only_chi,\\\n # multi_background_factor=multi_background_factor)\n\n # if this is the first iteration take over the results from premodel run\n if iteration_number == 0:\n initial_model_result, list_of_initial_model_result, list_of_image_0,\\\n list_of_initial_input_parameters, list_of_pre_chi2, list_of_psf_positions =\\\n pre_model_result, model_results, pre_images, pre_input_parameters,\\\n chi_2_before_iteration_array, list_of_psf_positions\n else:\n res_multi = model_multi(list_of_minchain, return_Images=True, use_only_chi=use_only_chi,\n multi_background_factor=multi_background_factor)\n # mean_res_of_multi_same_spot_proposal,list_of_single_res_proposal,list_of_single_model_image_proposal,\\\n # list_of_single_allparameters_proposal,list_of_single_chi_results_proposal=res_multi\n initial_model_result, list_of_initial_model_result, list_of_image_0,\\\n list_of_initial_input_parameters, list_of_pre_chi2, list_of_psf_positions = res_multi\n # modify variance image according to the models that have just been created\n # second time modifying variance image\n list_of_single_model_image = list_of_image_0\n list_of_var_images_via_model = []\n for index_of_single_image in range(len(list_of_sci_images)):\n popt = create_popt_for_custom_var(self.list_of_sci_images[index_of_single_image],\n self.list_of_var_images[index_of_single_image],\n self.list_of_mask_images[index_of_single_image])\n single_var_image_via_model =\\\n create_custom_var_from_popt(list_of_single_model_image[index_of_single_image], popt)\n\n list_of_var_images_via_model.append(single_var_image_via_model)\n # replace the variance images provided with these custom variance images\n list_of_var_images = list_of_var_images_via_model\n # self.list_of_var_images = list_of_var_images\n\n # initial_model_result,image_0,initial_input_parameters,pre_chi2=model(initial_input_parameters,return_Image=True,return_intermediate_images=False)\n if self.save:\n np.save('/tigress/ncaplar/Results/list_of_initial_model_result_'\n + str(num_iter) + '_' + str(iteration_number), list_of_initial_model_result)\n np.save('/tigress/ncaplar/Results/list_of_image_0_' + str(num_iter) + '_'\n + str(iteration_number), list_of_image_0)\n np.save('/tigress/ncaplar/Results/list_of_initial_input_parameters_'\n + str(num_iter) + '_' + str(iteration_number), list_of_initial_input_parameters)\n np.save('/tigress/ncaplar/Results/list_of_pre_chi2_' + str(num_iter) + '_'\n + str(iteration_number), list_of_pre_chi2)\n np.save('/tigress/ncaplar/Results/list_of_psf_positions_' + str(num_iter) + '_'\n + str(iteration_number), list_of_psf_positions)\n\n ##########################################################################\n # divided model images by their standard deviations\n\n list_of_image_0_std = []\n for i in range(len(list_of_image_0)):\n # normalizing by standard deviation image\n # May 22 modification\n STD = np.sqrt(list_of_var_images[i]) * array_of_std_sum[i]\n image_0 = list_of_image_0[i]\n list_of_image_0_std.append(image_0 / STD)\n\n ##########################################################################\n # updated science images divided by std (given that we created new custom\n # variance images, via model)\n\n ##########################################################################\n # mask model images at the start of this iteration, before modifying parameters\n # create uber_M0\n\n list_of_M0 = []\n list_of_M0_std = []\n for i in range(len(list_of_image_0_std)):\n\n image_0 = list_of_image_0[i]\n image_0_std = list_of_image_0_std[i]\n flux_mask = list_of_flux_mask[i]\n # what is list_of_mask_images?\n\n M0 = image_0[flux_mask].ravel()\n # M0=((image_0[flux_mask])/np.sum(image_0[flux_mask])).ravel()\n M0_std = ((image_0_std[flux_mask]) / 1).ravel()\n # M0_std=((image_0_std[flux_mask])/np.sum(image_0_std[flux_mask])).ravel()\n\n list_of_M0.append(M0)\n list_of_M0_std.append(M0_std)\n\n # join all M0,M0_std from invidiual images into one uber M0,M0_std\n uber_M0 = [item for sublist in list_of_M0 for item in sublist]\n uber_M0_std = [item for sublist in list_of_M0_std for item in sublist]\n\n uber_M0 = np.array(uber_M0)\n uber_M0_std = np.array(uber_M0_std)\n\n # uber_M0=uber_M0/np.sum(uber_M0)\n # uber_M0_std=uber_M0_std/np.sum(uber_M0_std)\n\n self.uber_M0 = uber_M0\n self.uber_M0_std = uber_M0_std\n\n if self.save:\n np.save('/tigress/ncaplar/Results/uber_M0_' + str(num_iter) + '_' + str(iteration_number),\n uber_M0)\n np.save('/tigress/ncaplar/Results/uber_M0_std_' + str(num_iter) + '_' + str(iteration_number),\n uber_M0_std)\n\n ##########################################################################\n # difference between model (uber_M0) and science (uber_I) at start of this iteration\n\n # non-std version\n # not used, that is ok, we are at the moment using std version\n IM_start = np.sum(np.abs(np.array(uber_I) - np.array(uber_M0)))\n # std version\n IM_start_std = np.sum(np.abs(np.array(uber_I_std) - np.array(uber_M0_std)))\n\n if len(list_of_flux_mask) > 1:\n IM_start_focus = np.sum(\n np.abs(np.array(uber_I) - np.array(uber_M0))[position_focus_1:position_focus_2])\n IM_start_std_focus = np.sum(\n np.abs(np.array(uber_I_std) - np.array(uber_M0_std))[position_focus_1:position_focus_2])\n\n # mean of differences of our images - should we use mean?; probably not... needs to be normalized?\n unitary_IM_start = np.mean(IM_start)\n unitary_IM_start_std = np.mean(IM_start_std)\n\n # logging.info list_of_IM_start_std\n if self.verbosity == 1:\n logging.info('np.sum(np.abs(I-M0)) before iteration ' + str(num_iter)\n + '_' + str(iteration_number) + ': ' + str(unitary_IM_start))\n logging.info('np.sum(np.abs(I_std-M0_std)) before iteration ' + str(num_iter)\n + '_' + str(iteration_number) + ': ' + str(unitary_IM_start_std))\n # logging.info('np.sum(np.abs(I_std-M0_std)) before iteration '+str(iteration_number)+':\n # '+str(unitary_IM_start_std))\n\n ##########################################################################\n # create list of new parametrizations to be tested\n # combine the old wavefront parametrization with the delta_z_parametrization\n\n # create two lists:\n # 1. one contains only wavefront parametrizations\n # 2. second contains the whole parametrizations\n # logging.info('checkpoint 0')\n if move_allparameters:\n list_of_all_wavefront_z_parameterization = []\n list_of_input_parameterizations = []\n for z_par in range(19 * 2):\n all_wavefront_z_parametrization_list = np.copy(all_wavefront_z_parametrization_old)\n all_wavefront_z_parametrization_list[z_par] =\\\n all_wavefront_z_parametrization_list[z_par] + \\\n array_of_delta_z_parametrizations[z_par]\n list_of_all_wavefront_z_parameterization.append(all_wavefront_z_parametrization_list)\n\n up_to_z22_start = all_wavefront_z_parametrization_list[0:19 * 2]\n from_z22_start = all_wavefront_z_parametrization_list[19 * 2:]\n\n parametrization_proposal = np.concatenate(\n (up_to_z22_start, nonwavefront_par, from_z22_start))\n # actually it is parametrization\n list_of_input_parameterizations.append(parametrization_proposal)\n # logging.info('checkpoint 1')\n for g_par in range(23):\n all_global_parametrization_list = np.copy(all_global_parametrization_old)\n all_global_parametrization_list[g_par] = all_global_parametrization_list[g_par] + \\\n array_of_delta_global_parametrizations[g_par]\n # list_of_all_wavefront_z_parameterization.append(all_wavefront_z_parametrization_list)\n\n up_to_z22_start = all_wavefront_z_parametrization_old[0:19 * 2]\n from_z22_start = all_wavefront_z_parametrization_old[19 * 2:]\n\n parametrization_proposal = np.concatenate(\n (up_to_z22_start, all_global_parametrization_list, from_z22_start))\n # actually it is parametrization\n list_of_input_parameterizations.append(parametrization_proposal)\n # logging.info('checkpoint 2')\n for z_par in range(19 * 2, len(all_wavefront_z_parametrization_old)):\n all_wavefront_z_parametrization_list = np.copy(all_wavefront_z_parametrization_old)\n all_wavefront_z_parametrization_list[z_par] =\\\n all_wavefront_z_parametrization_list[z_par] + \\\n array_of_delta_z_parametrizations[z_par]\n list_of_all_wavefront_z_parameterization.append(all_wavefront_z_parametrization_list)\n\n up_to_z22_start = all_wavefront_z_parametrization_list[0:19 * 2]\n from_z22_start = all_wavefront_z_parametrization_list[19 * 2:]\n\n parametrization_proposal = np.concatenate(\n (up_to_z22_start, nonwavefront_par, from_z22_start))\n # actually it is parametrization\n list_of_input_parameterizations.append(parametrization_proposal)\n # logging.info('checkpoint 3')\n\n else:\n list_of_all_wavefront_z_parameterization = []\n list_of_input_parameterizations = []\n for z_par in range(len(all_wavefront_z_parametrization_old)):\n all_wavefront_z_parametrization_list = np.copy(all_wavefront_z_parametrization_old)\n all_wavefront_z_parametrization_list[z_par] =\\\n all_wavefront_z_parametrization_list[z_par] + \\\n array_of_delta_z_parametrizations[z_par]\n list_of_all_wavefront_z_parameterization.append(all_wavefront_z_parametrization_list)\n\n up_to_z22_start = all_wavefront_z_parametrization_list[0:19 * 2]\n from_z22_start = all_wavefront_z_parametrization_list[19 * 2:]\n\n parametrization_proposal = np.concatenate(\n (up_to_z22_start, nonwavefront_par, from_z22_start))\n # actually it is parametrization\n list_of_input_parameterizations.append(parametrization_proposal)\n # logging.info('checkpoint 4')\n\n ##########################################################################\n # Starting testing new set of parameters\n # Creating new images\n\n out_ln = []\n out_ln_ind = []\n out_images = []\n out_parameters = []\n out_chi2 = []\n out_pfs_positions = []\n\n if self.verbosity >= 1:\n logging.info(\n 'We are now inside of the pool loop number ' + str(iteration_number)\n + ' with num_iter: ' + str(num_iter))\n\n # actually it is parametrization\n # list of (56-3)*2 sublists, each one with (56-3)*2 + 23 values\n time_start = time.time()\n\n # This assume that Zernike parameters go up to 56\n # I need to pass each of 106 parametrization to model_multi BUT\n # model_multi actually takes list of parameters, not parametrizations\n # I need list that has 106 sublists, each one of those being 9x(53+23)\n # 9 == number of images\n # 53 == number of Zernike parameters (56-3)\n # 23 == number of global parameters\n uber_list_of_input_parameters = []\n for i in range(len(list_of_input_parameterizations)):\n\n list_of_input_parameters = model_multi.create_list_of_allparameters(\n list_of_input_parameterizations[i],\n list_of_defocuses=list_of_defocuses_input_long, zmax=self.zmax)\n uber_list_of_input_parameters.append(list_of_input_parameters)\n\n # save the uber_list_of_input_parameters\n if self.save:\n np.save('/tigress/ncaplar/Results/uber_list_of_input_parameters_'\n + str(num_iter) + '_' + str(iteration_number), uber_list_of_input_parameters)\n\n # pass new model_multi that has fixed pos (October 6, 2020)\n # should have same paramter as staring model_multi, apart from\n # list_of_psf_positions (maybe variance?, but prob not)\n model_multi_out = LN_PFS_multi_same_spot(\n list_of_sci_images,\n list_of_var_images,\n list_of_mask_images=list_of_mask_images,\n wavelength=self.wavelength,\n dithering=self.dithering,\n save=self.save,\n zmax=self.zmax,\n verbosity=self.verbosity_model,\n double_sources=self.double_sources,\n double_sources_positions_ratios=double_sources_positions_ratios,\n npix=self.npix,\n fit_for_flux=self.fit_for_flux,\n test_run=self.test_run,\n list_of_psf_positions=list_of_psf_positions)\n\n if move_allparameters:\n self.array_of_delta_all_parametrizations = array_of_delta_all_parametrizations\n else:\n self.array_of_delta_z_parametrizations = array_of_delta_z_parametrizations\n\n # start of creating H\n\n # H is normalized difference between pixels of the model image\n # that result from changing the j-th Zernike term compared to the original image\n # This is expensive because we have to generate new image for each Zernike term\n if previous_best_result is None:\n if self.verbosity >= 1:\n logging.info('self.pool parameter is: ' + str(self.pool))\n\n # generate images\n if self.pool is None:\n out1 = map(\n partial(\n model_multi_out,\n return_Images=True,\n use_only_chi=use_only_chi,\n multi_background_factor=multi_background_factor),\n uber_list_of_input_parameters)\n else:\n out1 = self.pool.map(\n partial(\n model_multi_out,\n return_Images=True,\n use_only_chi=use_only_chi,\n multi_background_factor=multi_background_factor),\n uber_list_of_input_parameters)\n out1 = list(out1)\n time_end = time.time()\n if self.verbosity >= 1:\n logging.info('time_end-time_start for creating model_multi_out '\n + str(time_end - time_start))\n\n # normalization of the preinput run? (what did I mean by that)\n pre_input_parameters = np.array(pre_input_parameters)\n if self.verbosity >= 1:\n logging.info('pre_input_parameters.shape ' + str(pre_input_parameters.shape))\n logging.info('pre_input_parameters[0][0:5] ' + str(pre_input_parameters[0][0:5]))\n\n # select the column specifying the flux normalization from the input images\n array_of_normalizations_pre_input = pre_input_parameters[:, 41]\n\n # out1=a_pool.map(model,input_parameters,repeat(True))\n for i in range(len(uber_list_of_input_parameters)):\n # logging.info(i)\n\n # initial_model_result,list_of_initial_model_result,list_of_image_0,\\\n # list_of_initial_input_parameters,list_of_pre_chi2\n\n # outputs are\n # 0. mean likelihood\n # 1. list of individual res (likelihood)\n # 2. list of science images\n # 3. list of parameters used\n # 4. list of quality measurments\n\n out_images_pre_renormalization = np.array(out1[i][2])\n out_parameters_single_move = np.array(out1[i][3])\n # replace the normalizations in the output imags with the normalizations\n # from the input images\n array_of_normalizations_out = out_parameters_single_move[:, 41]\n out_renormalization_parameters = array_of_normalizations_pre_input /\\\n array_of_normalizations_out\n\n out_ln.append(out1[i][0])\n out_ln_ind.append(out1[i][1])\n # logging.info('out_images_pre_renormalization.shape: '+\n # str(out_images_pre_renormalization.shape))\n # logging.info('out_renormalization_parameters.shape: '+\n # str(out_renormalization_parameters.shape))\n # np.save('/tigress/ncaplar/Results/out_images_pre_renormalization',\n # out_images_pre_renormalization)\n\n out_images_step = []\n for lv in range(len(out_renormalization_parameters)):\n out_images_step.append(\n out_images_pre_renormalization[lv]\n * out_renormalization_parameters[lv])\n out_images.append(out_images_step)\n\n out_parameters.append(out1[i][3])\n out_chi2.append(out1[i][4])\n out_pfs_positions.append(out1[i][5])\n\n # We use these out_images to study the differences due to changing parameters;\n # We do not want the normalization to affect things (and position of optical center)\n # so we renormalized to that multiplication constants are the same as in the input\n\n time_end = time.time()\n if self.verbosity >= 1:\n logging.info('time_end-time_start for whole model_multi_out '\n + str(time_end - time_start))\n\n if self.save:\n np.save(\n '/tigress/ncaplar/Results/out_images_' + str(num_iter) + '_'\n + str(iteration_number), out_images)\n np.save(\n '/tigress/ncaplar/Results/out_parameters_' + str(num_iter) + '_'\n + str(iteration_number), out_parameters)\n np.save(\n '/tigress/ncaplar/Results/out_chi2_' + str(num_iter) + '_'\n + str(iteration_number), out_chi2)\n\n ##########################################################################\n # Normalize created images\n\n # We created ((zmax-3)*2) x N images, where N is the number of defocused images\n\n # join all images together\n list_of_images_normalized_uber = []\n # list_of_images_normalized_std_uber = []\n # go over (zmax-3)*2 images\n for j in range(len(out_images)):\n # two steps for what could have been achived in one, but to ease up\n # transition from previous code\n out_images_single_parameter_change = out_images[j]\n optpsf_list = out_images_single_parameter_change\n\n # flux image has to correct per image\n # mask images that have been created in the fitting procedure with the\n # appropriate flux mask\n images_normalized = []\n for i in range(len(optpsf_list)):\n flux_mask = list_of_flux_mask[i]\n images_normalized.append((optpsf_list[i][flux_mask]).ravel())\n\n images_normalized_flat = [item for sublist in images_normalized for item in sublist]\n images_normalized_flat = np.array(images_normalized_flat)\n\n # list of (zmax-3)*2 raveled images\n list_of_images_normalized_uber.append(images_normalized_flat)\n\n # same but divided by STD\n # images_normalized_std=[]\n # for i in range(len(optpsf_list)):\n # seems that I am a bit more verbose here with my definitions\n # optpsf_list_i=optpsf_list[i]\n\n # do I want to generate new STD images, from each image?\n # May 22 modification\n # STD=list_of_sci_image_std[i]*array_of_std_sum[i]\n # optpsf_list_i_STD=optpsf_list_i/STD\n # flux_mask=list_of_flux_mask[i]\n # images_normalized_std.append((optpsf_list_i_STD[flux_mask]/np.sum(optpsf_list_i_STD[flux_mask])).ravel())\n\n # join all images together\n # images_normalized_std_flat=\n # [item for sublist in images_normalized_std for item in sublist]\n # normalize so that the sum is still one\n # images_normalized_std_flat=np.array(images_normalized_std_flat)/len(optpsf_list)\n\n # list_of_images_normalized_std_uber.append(images_normalized_std_flat)\n\n # create uber images_normalized,images_normalized_std\n # images that have zmax*2 rows and very large number of columns (number of\n # non-masked pixels from all N images)\n uber_images_normalized = np.array(list_of_images_normalized_uber)\n # uber_images_normalized_std=np.array(list_of_images_normalized_std_uber)\n\n if self.save:\n np.save(\n '/tigress/ncaplar/Results/uber_images_normalized_' + str(num_iter) + '_'\n + str(iteration_number), uber_images_normalized)\n\n # np.save('/tigress/ncaplar/Results/uber_images_normalized_std_'+str(num_iter)+'_'+str(iteration_number),\\\n # uber_images_normalized_std)\n\n # single_wavefront_parameter_list=[]\n # for i in range(len(out_parameters)):\n # single_wavefront_parameter_list.\n # append(np.concatenate((out_parameters[i][:19],out_parameters[i][42:])) )\n\n ##########################################################################\n # Core Tokovinin algorithm\n\n if self.verbosity >= 1:\n logging.info('images_normalized (uber).shape: ' + str(uber_images_normalized.shape))\n logging.info('array_of_delta_z_parametrizations[:,None].shape'\n + str(array_of_delta_z_parametrizations[:, None].shape))\n # equation A1 from Tokovinin 2006\n # new model minus old model\n if move_allparameters:\n H = np.transpose(np.array((uber_images_normalized - uber_M0))\n / array_of_delta_all_parametrizations[:, None])\n # H_std=np.transpose(np.array((uber_images_normalized_std-uber_M0_std))/\\\n # array_of_delta_z_parametrizations[:,None])\n H_std = np.transpose(np.array((uber_images_normalized - uber_M0))\n / array_of_delta_all_parametrizations[:, None]) /\\\n uber_std.ravel()[:, None]\n else:\n H = np.transpose(np.array((uber_images_normalized - uber_M0))\n / array_of_delta_z_parametrizations[:, None])\n # H_std=np.transpose(np.array((uber_images_normalized_std-uber_M0_std))/array_of_delta_z_parametrizations[:,None])\n H_std = np.transpose(np.array((uber_images_normalized - uber_M0))\n / array_of_delta_z_parametrizations[:, None]) /\\\n uber_std.ravel()[:, None]\n\n array_of_delta_z_parametrizations_None = np.copy(array_of_delta_z_parametrizations[:, None])\n else:\n H = self.create_simplified_H(previous_best_result)\n H_std = H / uber_std.ravel()[:, None]\n\n # end of creating H\n\n if self.save and previous_best_result is None:\n np.save('/tigress/ncaplar/Results/array_of_delta_z_parametrizations_None_'\n + str(num_iter) + '_' + str(iteration_number),\n array_of_delta_z_parametrizations_None)\n\n if self.save:\n np.save('/tigress/ncaplar/Results/H_' + str(num_iter) + '_' + str(iteration_number), H)\n if self.save:\n np.save('/tigress/ncaplar/Results/H_std_' + str(num_iter) + '_' + str(iteration_number),\n H_std)\n\n first_proposal_Tokovnin, first_proposal_Tokovnin_std = self.create_first_proposal_Tokovnin(\n H, H_std, uber_I, uber_M0, uber_std, up_to_which_z=up_to_which_z)\n\n \"\"\"\n #logging.info('np.mean(H,axis=0).shape)'+str(np.mean(H,axis=0).shape))\n singlular_parameters=np.arange(H.shape[1])[np.abs((np.mean(H,axis=0)))<0.01]\n non_singlular_parameters=np.arange(H.shape[1])[np.abs((np.mean(H,axis=0)))>0.01]\n #logging.info('non_singlular_parameters.shape)'+str(non_singlular_parameters.shape))\n H=H[:,non_singlular_parameters]\n H_std=H_std[:,non_singlular_parameters]\n\n HHt=np.matmul(np.transpose(H),H)\n HHt_std=np.matmul(np.transpose(H_std),H_std)\n #logging.info('svd thresh is '+str(thresh))\n #invHHt=svd_invert(HHt,thresh)\n #invHHt_std=svd_invert(HHt_std,thresh)\n invHHt=np.linalg.inv(HHt)\n invHHt_std=np.linalg.inv(HHt_std)\n\n invHHtHt=np.matmul(invHHt,np.transpose(H))\n invHHtHt_std=np.matmul(invHHt_std,np.transpose(H_std))\n\n\n # I is uber_I now (science images)\n # M0 is uber_M0 now (set of models before the iteration)\n first_proposal_Tokovnin=np.matmul(invHHtHt,uber_I-uber_M0)\n #first_proposal_Tokovnin_std=np.matmul(invHHtHt_std,uber_I_std-uber_M0_std)\n first_proposal_Tokovnin_std=np.matmul(invHHtHt_std,(uber_I-uber_M0)/uber_std.ravel())\n\n\n\n # if you have removed certain parameters because of the singularity,\n return them here, with no change\n if len(singlular_parameters)>0:\n for i in range(len(singlular_parameters)):\n first_proposal_Tokovnin=np.insert(first_proposal_Tokovnin,singlular_parameters[i],0)\n first_proposal_Tokovnin_std=np.insert(first_proposal_Tokovnin_std,singlular_parameters[i],0)\n #logging.info('first_proposal_Tokovnin_std'+str(first_proposal_Tokovnin_std.shape))\n #logging.info('invHHtHt_std.shape'+str(invHHtHt_std.shape))\n\n \"\"\"\n\n if self.verbosity >= 1:\n logging.info('first_proposal_Tokovnin[:5] is: '\n + str(first_proposal_Tokovnin[:8 * 2]))\n logging.info('first_proposal_Tokovnin_std[:5] is: '\n + str(first_proposal_Tokovnin_std[:8 * 2]))\n try:\n logging.info('ratio is of proposed to initial parameters (std) is: '\n + str(first_proposal_Tokovnin_std / array_of_delta_z_parametrizations))\n except BaseException:\n pass\n\n # Tokovnin_proposal=0.7*first_proposal_Tokovnin\n if move_allparameters:\n Tokovnin_proposal = np.zeros((129,))\n # Tokovnin_proposal[non_singlular_parameters]=0.7*first_proposal_Tokovnin_std\n Tokovnin_proposal[non_singlular_parameters] = 1 * first_proposal_Tokovnin_std # noqa\n\n all_parametrization_new = np.copy(initial_input_parameterization)\n allparameters_parametrization_proposal_after_iteration_before_global_check =\\\n all_parametrization_new + Tokovnin_proposal\n # tests if the global parameters would be out of bounds - if yes, reset\n # them to the limit values\n # noqa: E501 - breaking line limit in order to keep informative names\n global_parametrization_proposal_after_iteration_before_global_check =\\\n allparameters_parametrization_proposal_after_iteration_before_global_check[19 * 2:19 * 2 + 23] # noqa: E501\n checked_global_parameters = check_global_parameters(\n global_parametrization_proposal_after_iteration_before_global_check, test_print=1)\n\n allparameters_parametrization_proposal_after_iteration = np.copy(\n allparameters_parametrization_proposal_after_iteration_before_global_check)\n allparameters_parametrization_proposal_after_iteration[19 * 2:19 * 2 + 23] =\\\n checked_global_parameters\n\n else:\n # Tokovnin_proposal=0.7*first_proposal_Tokovnin_std\n Tokovnin_proposal = 1 * first_proposal_Tokovnin_std\n\n if self.verbosity >= 1:\n logging.info('Tokovnin_proposal[:5] is: ' + str(Tokovnin_proposal[:5]))\n if self.zmax > 35:\n logging.info('Tokovnin_proposal[38:43] is: ' + str(Tokovnin_proposal[38:43]))\n # logging.info('all_wavefront_z_parametrization_old in '+str(iteration_number)+' '+\n # str(all_wavefront_z_parametrization_old[:5]))\n # logging.info('Tokovnin_proposal[:5] is: '+str(Tokovnin_proposal[:5]))\n # logging.info('Tokovnin_proposal.shape '+str(Tokovnin_proposal.shape))\n\n # if the Tokovinin proposal is not made, return the initial result\n if len(Tokovnin_proposal) < 10:\n # return initial_model_result,list_of_initial_model_result,list_of_image_0,\\\n # allparameters_parametrization_proposal,list_of_initial_input_parameters,list_of_pre_chi2,list_of_psf_positions\n return initial_model_result, initial_model_result,\\\n list_of_initial_model_result, list_of_initial_model_result,\\\n out_images, list_of_image_0, list_of_image_0,\\\n allparameters_parametrization_proposal, allparameters_parametrization_proposal,\\\n list_of_initial_input_parameters, list_of_initial_input_parameters,\\\n list_of_pre_chi2, list_of_pre_chi2,\\\n list_of_psf_positions, list_of_psf_positions\n\n break\n\n # logging.info('std of Tokovnin_proposal is: '+str(np.std(Tokovnin_proposal)))\n if move_allparameters:\n # all_wavefront_z_parametrization_new=np.copy(all_wavefront_z_parametrization_old)\n # all_global_parametrization_new=np.copy(all_global_parametrization_old)\n # all_parametrization_new=np.copy(initial_input_parameterization)\n\n # allparameters_parametrization_proposal_after_iteration=all_parametrization_new+Tokovnin_proposal\n\n up_to_z22_end = allparameters_parametrization_proposal_after_iteration[:19 * 2]\n from_z22_end = allparameters_parametrization_proposal_after_iteration[19 * 2 + 23:]\n all_wavefront_z_parametrization_new = np.concatenate((up_to_z22_end, from_z22_end))\n\n # all_global_parametrization_new = allparameters_parametrization_proposal_after_iteration[\n # 19 * 2:19 * 2 + 23]\n\n else:\n all_wavefront_z_parametrization_new = np.copy(all_wavefront_z_parametrization_old)\n all_wavefront_z_parametrization_new = all_wavefront_z_parametrization_new + Tokovnin_proposal\n up_to_z22_end = all_wavefront_z_parametrization_new[:19 * 2]\n from_z22_end = all_wavefront_z_parametrization_new[19 * 2:]\n allparameters_parametrization_proposal_after_iteration = np.concatenate(\n (up_to_z22_end, nonwavefront_par, from_z22_end))\n\n if self.save:\n np.save(\n '/tigress/ncaplar/Results/first_proposal_Tokovnin' + str(num_iter) + '_'\n + str(iteration_number), first_proposal_Tokovnin)\n np.save(\n '/tigress/ncaplar/Results/first_proposal_Tokovnin_std' + str(num_iter) + '_'\n + str(iteration_number), first_proposal_Tokovnin_std)\n np.save(\n '/tigress/ncaplar/Results/allparameters_parametrization_proposal_after_iteration_'\n + str(num_iter) + '_' + str(iteration_number),\n allparameters_parametrization_proposal_after_iteration)\n\n #########################\n # Creating single exposure with new proposed parameters and seeing if there is improvment\n time_start_final = time.time()\n\n list_of_parameters_after_iteration = model_multi.create_list_of_allparameters(\n allparameters_parametrization_proposal_after_iteration,\n list_of_defocuses=list_of_defocuses_input_long,\n zmax=self.zmax)\n res_multi = model_multi(\n list_of_parameters_after_iteration,\n return_Images=True,\n use_only_chi=use_only_chi,\n multi_background_factor=multi_background_factor)\n\n if self.verbosity >= 1:\n logging.info('allparameters_parametrization_proposal_after_iteration '\n + str(allparameters_parametrization_proposal_after_iteration[0:5]))\n logging.info('list_of_parameters_after_iteration[0][0:5] '\n + str(list_of_parameters_after_iteration[0][0:5]))\n\n final_model_result, list_of_final_model_result, list_of_image_final,\\\n list_of_finalinput_parameters, list_of_after_chi2, list_of_final_psf_positions = res_multi\n # third (last?) time modifying variance image\n list_of_single_model_image = list_of_image_final\n list_of_var_images_via_model = []\n for index_of_single_image in range(len(list_of_sci_images)):\n popt = create_popt_for_custom_var(self.list_of_sci_images[index_of_single_image],\n self.list_of_var_images[index_of_single_image],\n self.list_of_mask_images[index_of_single_image])\n single_var_image_via_model =\\\n create_custom_var_from_popt(list_of_single_model_image[index_of_single_image], popt)\n\n list_of_var_images_via_model.append(single_var_image_via_model)\n # replace the variance images provided with these custom variance images\n list_of_var_images = list_of_var_images_via_model\n # self.list_of_var_images = list_of_var_images\n\n time_end_final = time.time()\n if self.verbosity >= 1:\n logging.info('Total time taken for final iteration was ' + str(time_end_final\n - time_start_final)\n + ' seconds with num_iter: ' + str(num_iter))\n\n if self.save:\n np.save('/tigress/ncaplar/Results/list_of_final_model_result_'\n + str(num_iter) + '_' + str(iteration_number), list_of_final_model_result)\n np.save(\n '/tigress/ncaplar/Results/list_of_image_final_' + str(num_iter) + '_'\n + str(iteration_number), list_of_image_final)\n np.save('/tigress/ncaplar/Results/list_of_finalinput_parameters_'\n + str(num_iter) + '_' + str(iteration_number), list_of_finalinput_parameters)\n np.save('/tigress/ncaplar/Results/list_of_after_chi2_' + str(num_iter) + '_'\n + str(iteration_number), list_of_after_chi2)\n np.save('/tigress/ncaplar/Results/list_of_final_psf_positions_'\n + str(num_iter) + '_' + str(iteration_number), list_of_final_psf_positions)\n\n if self.verbosity >= 1:\n logging.info('list_of_final_psf_positions : ' + str(list_of_psf_positions))\n\n ##########################################################################\n # divided model images by their standard deviations\n\n list_of_image_final_std = []\n for i in range(len(list_of_image_0)):\n # normalizing by standard deviation image\n # May 22 modification\n STD = np.sqrt(list_of_var_images[i]) * array_of_std_sum[i]\n image_final = list_of_image_final[i]\n list_of_image_final_std.append(image_final / STD)\n\n ##########################################################################\n # masked model images after this iteration (mask by flux criteria)\n\n list_of_M_final = []\n list_of_M_final_std = []\n for i in range(len(list_of_image_final_std)):\n\n image_final = list_of_image_final[i]\n image_final_std = list_of_image_final_std[i]\n flux_mask = list_of_flux_mask[i]\n # what is list_of_mask_images?\n\n # M_final=((image_final[flux_mask])/np.sum(image_final[flux_mask])).ravel()\n M_final = (image_final[flux_mask]).ravel()\n # M_final_std=((image_final_std[flux_mask])/np.sum(image_final_std[flux_mask])).ravel()\n M_final_std = ((image_final_std[flux_mask]) / 1).ravel()\n\n list_of_M_final.append(M_final)\n list_of_M_final_std.append(M_final_std)\n\n # join all M0,M0_std from invidiual images into one uber M0,M0_std\n uber_M_final = [item for sublist in list_of_M_final for item in sublist]\n uber_M_final_std = [item for sublist in list_of_M_final_std for item in sublist]\n\n uber_M_final = np.array(uber_M_final)\n uber_M_final_std = np.array(uber_M_final_std)\n\n uber_M_final_linear_prediction = uber_M0 + \\\n self.create_linear_aproximation_prediction(H, first_proposal_Tokovnin)\n uber_M_final_std_linear_prediction = uber_M0_std + \\\n self.create_linear_aproximation_prediction(H_std, first_proposal_Tokovnin_std)\n\n if self.save:\n np.save(\n '/tigress/ncaplar/Results/uber_M_final_' + str(num_iter) + '_'\n + str(iteration_number), uber_M_final)\n np.save(\n '/tigress/ncaplar/Results/uber_M_final_std_' + str(num_iter) + '_'\n + str(iteration_number), uber_M_final_std)\n if self.save:\n np.save('/tigress/ncaplar/Results/uber_M_final_linear_prediction_'\n + str(num_iter) + '_' + str(iteration_number), uber_M_final_linear_prediction)\n np.save('/tigress/ncaplar/Results/uber_M_final_std_linear_prediction_'\n + str(num_iter) + '_' + str(iteration_number), uber_M_final_std_linear_prediction)\n\n ####\n # Seeing if there is an improvment\n # Quality measure is the sum of absolute differences of uber_I_std (all images/std)\n # and uber_M_final_std (all models / std)\n # how closely is that correlated with improvments in final_model_result?\n\n # non-std version\n # not used, that is ok, we are at the moment using std version\n IM_final = np.sum(np.abs(np.array(uber_I) - np.array(uber_M_final)))\n # std version\n IM_final_std = np.sum(np.abs(np.array(uber_I_std) - np.array(uber_M_final_std)))\n\n # linear prediction versions\n IM_final_linear_prediction = np.sum(\n np.abs(np.array(uber_I) - np.array(uber_M_final_linear_prediction)))\n # std version\n IM_final_std_linear_prediction = np.sum(\n np.abs(np.array(uber_I_std) - np.array(uber_M_final_std_linear_prediction)))\n\n # do a separate check on the improvment measure for the image in focus, when applicable\n if len(list_of_flux_mask) > 1:\n IM_final_focus = np.sum(\n np.abs(np.array(uber_I) - np.array(uber_M_final))[position_focus_1:position_focus_2])\n IM_final_std_focus = np.sum(\n np.abs(np.array(uber_I_std)\n - np.array(uber_M_final_std))[position_focus_1:position_focus_2])\n\n if self.verbosity >= 1:\n logging.info('I-M_start before iteration ' + str(iteration_number)\n + ' with num_iter ' + str(num_iter) + ': ' + str(IM_start))\n logging.info('I-M_final after iteration ' + str(iteration_number)\n + ' with num_iter ' + str(num_iter) + ': ' + str(IM_final))\n logging.info('IM_final_linear_prediction after iteration ' + str(iteration_number)\n + ' with num_iter ' + str(num_iter) + ': ' + str(IM_final_linear_prediction))\n if len(list_of_flux_mask) > 1:\n logging.info('I-M_start_focus before iteration ' + str(iteration_number)\n + ' with num_iter ' + str(num_iter) + ': ' + str(IM_start_focus))\n logging.info('I-M_final_focus after iteration ' + str(iteration_number)\n + ' with num_iter ' + str(num_iter) + ': ' + str(IM_final_focus))\n\n logging.info('I_std-M_start_std after iteration ' + str(iteration_number)\n + ' with num_iter ' + str(num_iter) + ': ' + str(IM_start_std))\n logging.info('I_std-M_final_std after iteration ' + str(iteration_number)\n + ' with num_iter ' + str(num_iter) + ': ' + str(IM_final_std))\n logging.info('IM_final_std_linear_prediction after iteration ' + str(iteration_number)\n + ' with num_iter ' + str(num_iter) + ': ' + str(IM_final_std_linear_prediction))\n if len(list_of_flux_mask) > 1:\n logging.info('I-M_start_focus_std before iteration ' + str(iteration_number)\n + ' with num_iter ' + str(num_iter) + ': ' + str(IM_start_std_focus))\n logging.info('I-M_final_focus_std after iteration ' + str(iteration_number)\n + ' with num_iter ' + str(num_iter) + ': ' + str(IM_final_std_focus))\n\n logging.info('Likelihood before iteration ' + str(iteration_number)\n + ' with num_iter ' + str(num_iter) + ': ' + str(initial_model_result))\n logging.info('Likelihood after iteration ' + str(iteration_number)\n + ' with num_iter ' + str(num_iter) + ': ' + str(final_model_result))\n\n logging.info(\n 'Likelihood before iteration '\n + str(iteration_number)\n + ' with num_iter '\n + str(num_iter)\n + ', per image: '\n + str(list_of_initial_model_result))\n logging.info(\n 'Likelihood after iteration '\n + str(iteration_number)\n + ' with num_iter '\n + str(num_iter)\n + ', per image: '\n + str(list_of_final_model_result))\n\n # logging.info('chi_2_after_iteration/chi_2_before_iteration '+\n # str(chi_2_after_iteration/chi_2_before_iteration ))\n logging.info('IM_final/IM_start with num_iter ' + str(num_iter)\n + ': ' + str(IM_final / IM_start))\n logging.info('IM_final_std/IM_start_std with num_iter '\n + str(num_iter) + ': ' + str(IM_final_std / IM_start_std))\n if len(list_of_flux_mask) > 1:\n logging.info('IM_final_focus/IM_start_focus with num_iter '\n + str(num_iter) + ': ' + str(IM_final_focus / IM_start_focus))\n logging.info('IM_final_std_focus/IM_start_std_focus with num_iter '\n + str(num_iter) + ': ' + str(IM_final_std_focus / IM_start_std_focus))\n\n logging.info('#########################################################')\n\n ##################\n # If improved take new parameters, if not dont\n\n # TEST, May18 2021\n # if more images, test that everything AND focus image has improved\n if len(list_of_flux_mask) > 1:\n if IM_final_std / IM_start_std < 1.0 and IM_final_std_focus / IM_start_std_focus < 1.25:\n condition_for_improvment = True\n else:\n condition_for_improvment = False\n else:\n # if you are having only one image\n if IM_final_std / IM_start_std < 1.0:\n condition_for_improvment = True\n\n if self.verbosity >= 1:\n logging.info('condition_for_improvment in iteration ' + str(iteration_number)\n + ' with num_iter ' + str(num_iter) + ': ' + str(condition_for_improvment))\n if condition_for_improvment:\n # when the quality measure did improve\n did_chi_2_improve = 1 # noqa\n number_of_non_decreses.append(0)\n if self.verbosity >= 1:\n logging.info('number_of_non_decreses:' + str(number_of_non_decreses))\n logging.info('current value of number_of_non_decreses is: '\n + str(np.sum(number_of_non_decreses)))\n logging.info('#########################################')\n logging.info('#########################################')\n else:\n # when the quality measure did not improve\n did_chi_2_improve = 0 # noqa\n # resetting all parameters\n if move_allparameters:\n all_wavefront_z_parametrization_new = np.copy(all_wavefront_z_parametrization_old)\n # all_global_parametrization_new = np.copy(all_global_parametrization_old)\n allparameters_parametrization_proposal_after_iteration = initial_input_parameterization\n else:\n all_wavefront_z_parametrization_new = np.copy(all_wavefront_z_parametrization_old)\n # chi_2_after_iteration=chi_2_before_iteration\n up_to_z22_end = all_wavefront_z_parametrization_new[:19 * 2]\n from_z22_start = all_wavefront_z_parametrization_new[19 * 2:]\n allparameters_parametrization_proposal_after_iteration = np.concatenate(\n (up_to_z22_start, nonwavefront_par, from_z22_start))\n thresh = thresh0\n number_of_non_decreses.append(1)\n if self.verbosity >= 1:\n logging.info('number_of_non_decreses:' + str(number_of_non_decreses))\n logging.info('current value of number_of_non_decreses is: '\n + str(np.sum(number_of_non_decreses)))\n logging.info('#######################################################')\n logging.info('#######################################################')\n\n final_model_result = initial_model_result\n list_of_final_model_result = list_of_initial_model_result\n list_of_image_final = pre_images\n allparameters_parametrization_proposal_after_iteration =\\\n allparameters_parametrization_proposal\n list_of_finalinput_parameters = list_of_initial_input_parameters\n list_of_after_chi2 = list_of_pre_chi2\n list_of_final_psf_positions = list_of_psf_positions\n\n if np.sum(number_of_non_decreses) == 1:\n if not return_Images:\n return final_model_result\n else:\n if previous_best_result is None:\n return initial_model_result, final_model_result,\\\n list_of_initial_model_result, list_of_final_model_result,\\\n out_images, pre_images, list_of_image_final,\\\n allparameters_parametrization_proposal,\\\n allparameters_parametrization_proposal_after_iteration,\\\n list_of_initial_input_parameters, list_of_finalinput_parameters,\\\n list_of_pre_chi2, list_of_after_chi2,\\\n list_of_psf_positions, list_of_final_psf_positions,\\\n [uber_images_normalized, uber_M0_std, H_std,\n array_of_delta_z_parametrizations_None, list_of_final_psf_positions]\n else:\n return initial_model_result, final_model_result,\\\n list_of_initial_model_result, list_of_final_model_result,\\\n out_images, pre_images, list_of_image_final,\\\n allparameters_parametrization_proposal,\\\n allparameters_parametrization_proposal_after_iteration,\\\n list_of_initial_input_parameters, list_of_finalinput_parameters,\\\n list_of_pre_chi2, list_of_after_chi2,\\\n list_of_psf_positions, list_of_final_psf_positions\n\n break\n\n # if return_Images==False just return the mean likelihood\n if not return_Images:\n return final_model_result\n else:\n # if you return images, return full\n if previous_best_result is None:\n # 0. likelihood averaged over all images (before the function)\n # 1. likelihood averaged over all images (after the function)\n # 2. likelihood per image (output from model_multi) (before the function)\n # 3. likelihood per image (output from model_multi) (after the function)\n # 4. out_images\n # 5. list of initial model images\n # 6. list of final model images\n # 7. parametrization before the function\n # 8. parametrization after the function\n # 9. list of parameters per image (before the function)\n # 10. list of parameters per image (after the function)\n # 11. list of chi2 per image (before the function)\n # 12. list of chi2 per image (after the function)\n # 13. list of psf position of image (before the function)\n # 14. list of psf position of image (after the function)\n return initial_model_result, final_model_result,\\\n list_of_initial_model_result, list_of_final_model_result,\\\n out_images, pre_images, list_of_image_final,\\\n allparameters_parametrization_proposal,\\\n allparameters_parametrization_proposal_after_iteration,\\\n list_of_initial_input_parameters, list_of_finalinput_parameters,\\\n list_of_pre_chi2, list_of_after_chi2,\\\n list_of_psf_positions, list_of_final_psf_positions,\\\n [uber_images_normalized, uber_M0_std, H_std,\n array_of_delta_z_parametrizations_None, list_of_final_psf_positions]\n # return final_model_result,list_of_final_model_result,out_images,list_of_image_final,\\\n # allparameters_parametrization_proposal_after_iteration,list_of_finalinput_parameters,\\\n # list_of_after_chi2,list_of_final_psf_positions,\\\n # [uber_images_normalized,uber_M0_std,H_std,array_of_delta_z_parametrizations_None,list_of_final_psf_positions]\n\n else:\n return initial_model_result, final_model_result,\\\n list_of_initial_model_result, list_of_final_model_result,\\\n out_images, pre_images, list_of_image_final,\\\n allparameters_parametrization_proposal,\\\n allparameters_parametrization_proposal_after_iteration,\\\n list_of_initial_input_parameters, list_of_finalinput_parameters,\\\n list_of_pre_chi2, list_of_after_chi2,\\\n list_of_psf_positions, list_of_final_psf_positions\n\n def create_simplified_H(self, previous_best_result, multi_background_factor=3):\n \"\"\"create matrix `H` using the provided (previously made) images and changes\n\n The simplification comes from the assumtion that the changes are still\n the same in this iteration as in the previous iteration\n\n Parameters\n ----------\n previous_best_result: `np.array?`\n The arrays with the output from ?\n\n Returns\n ----------\n H : `np.array`\n normalized difference between pixel values\n \"\"\"\n\n # to be compatable with version before 0.45 where previous_best_result\n # was actually only the last part of len=5\n #\n if len(previous_best_result) == 5:\n previous_best_result = previous_best_result\n else:\n # if you are passing the whole best result, separte the parts of the result\n main_body_of_best_result = previous_best_result[:-1]\n previous_best_result = previous_best_result[-1]\n\n # we need actual final model images from the previous best result\n # list_of_image_0_from_previous_best_result=main_body_of_best_result[6]\n # list_of_image_0=list_of_image_0_from_previous_best_result\n\n # we need actual initial model images from the previous best result\n # this will be used to evalute change of model due to changes in singel wavefront parameters\n # i.e., to estimate matrix H\n list_of_image_0_from_previous_best_result = main_body_of_best_result[5]\n list_of_image_0 = list_of_image_0_from_previous_best_result\n\n list_of_flux_mask = self.list_of_flux_mask\n\n ##########################################################################\n # divided model images by their standard deviations\n # list_of_image_0_std = []\n # for i in range(len(list_of_image_0)):\n # normalizing by standard deviation image\n # STD = np.sqrt(list_of_var_images[i])\n # image_0 = list_of_image_0[i]\n # list_of_image_0_std.append(image_0 / STD)\n\n ########################################################\n\n # mask model images at the start of this iteration, before modifying parameters\n # create uber_M0_previous_best - uber_M0 derived from previous best, but with current flux mask\n\n list_of_M0 = []\n # list_of_M0_std = []\n for i in range(len(list_of_image_0)):\n\n image_0 = list_of_image_0[i]\n # image_0_std = list_of_image_0_std[i]\n flux_mask = list_of_flux_mask[i]\n\n M0 = image_0[flux_mask].ravel()\n # M0=((image_0[flux_mask])/np.sum(image_0[flux_mask])).ravel()\n # M0_std = ((image_0_std[flux_mask]) / 1).ravel()\n # M0_std=((image_0_std[flux_mask])/np.sum(image_0_std[flux_mask])).ravel()\n\n list_of_M0.append(M0)\n # list_of_M0_std.append(M0_std)\n\n # join all M0,M0_std from invidiual images into one uber M0,M0_std\n uber_M0_previous_best = [item for sublist in list_of_M0 for item in sublist]\n # uber_M0_previous_best_std = [item for sublist in list_of_M0_std for item in sublist]\n\n uber_M0_previous_best = np.array(uber_M0_previous_best)\n # uber_M0_previous_best_std = np.array(uber_M0_previous_best_std)\n\n # uber_M0=uber_M0/np.sum(uber_M0)\n # uber_M0_std=uber_M0_std/np.sum(uber_M0_std)\n\n self.uber_M0_previous_best = uber_M0_previous_best\n # self.uber_M0_previous_best_std = uber_M0_previous_best_std\n\n ########################################################\n # uber_images_normalized_previous_best, but with current flux mask\n\n # previous uber images - not used\n # uber_images_normalized_previous_best_old_flux_mask=previous_best_result[0]\n # previous uber model - not used\n # uber_M0_previous_best_old_flux_mask=previous_best_result[1]\n\n # we need out images showing difference from original image due to\n # changing a single Zernike parameter\n out_images = main_body_of_best_result[4]\n\n # join all images together\n list_of_images_normalized_uber = []\n # list_of_images_normalized_std_uber = []\n # go over (zmax-3)*2 images\n for j in range(len(out_images)):\n # two steps for what could have been achived in one, but to ease up transition from previous code\n out_images_single_parameter_change = out_images[j]\n optpsf_list = out_images_single_parameter_change\n\n # flux image has to correct per image\n # mask images that have been created in the fitting procedure with the appropriate flux mask\n images_normalized = []\n for i in range(len(optpsf_list)):\n\n flux_mask = list_of_flux_mask[i]\n if j == 0:\n\n # logging.info('sum_flux_in images'+str([i,np.sum(flux_mask)]))\n pass\n images_normalized.append((optpsf_list[i][flux_mask]).ravel())\n # !old double-normalizing code\n # !images_normalized.append((optpsf_list[i][flux_mask]/np.sum(optpsf_list[i][flux_mask])).ravel())\n\n images_normalized_flat = [item for sublist in images_normalized for item in sublist]\n images_normalized_flat = np.array(images_normalized_flat)\n # images_normalized_flat=np.array(images_normalized_flat)/len(optpsf_list)\n\n # list of (zmax-3)*2 raveled images\n list_of_images_normalized_uber.append(images_normalized_flat)\n\n # same but divided by STD\n # images_normalized_std = []\n for i in range(len(optpsf_list)):\n # seems that I am a bit more verbose here with my definitions\n # optpsf_list_i = optpsf_list[i]\n\n # do I want to generate new STD images, from each image?\n # STD = list_of_sci_image_std[i]\n # optpsf_list_i_STD = optpsf_list_i / STD\n flux_mask = list_of_flux_mask[i]\n # images_normalized_std.append((optpsf_list_i_STD[flux_mask]/np.sum(optpsf_list_i_STD[flux_mask])).ravel())\n\n # join all images together\n # images_normalized_std_flat=[item for sublist in images_normalized_std for item in sublist]\n # normalize so that the sum is still one\n # images_normalized_std_flat=np.array(images_normalized_std_flat)/len(optpsf_list)\n\n # list_of_images_normalized_std_uber.append(images_normalized_std_flat)\n\n # create uber images_normalized,images_normalized_std\n # images that have zmax*2 rows and very large number of columns (number of\n # non-masked pixels from all N images)\n uber_images_normalized_previous_best = np.array(list_of_images_normalized_uber)\n\n ########################################################\n # current model image\n # uber_M0 = self.uber_M0\n\n # current change of the parameters\n # if self.move_allparameters:\n # array_of_delta_all_parametrizations = self.array_of_delta_all_parametrizations\n # else:\n array_of_delta_parametrizations = self.array_of_delta_z_parametrizations\n\n # previous uber model\n # uber_M0_previous_best=previous_best_result[1]\n # previous H (not used)\n # H_previous_best=previous_best_result[2]\n # how much has delta parametrizations changed in the previous result\n array_of_delta_parametrizations_None_previous_best = previous_best_result[3]\n\n # ratio between current parametrization and the previous (provided) changed parametrization\n ratio_of_parametrizations = (\n array_of_delta_parametrizations[:, None] / array_of_delta_parametrizations_None_previous_best)\n\n # create the array of how wavefront changes the uber_model by multiply the changes with new ratios\n array_of_wavefront_changes = np.transpose(\n ratio_of_parametrizations\n * np.array(uber_images_normalized_previous_best - uber_M0_previous_best)\n / (array_of_delta_parametrizations_None_previous_best))\n\n # difference between current model image and previous model image\n\n # logging.info('uber_images_normalized_previous_best.shape'+str(uber_images_normalized_previous_best.shape))\n # logging.info('uber_M0_previous_best.shape'+str(uber_M0_previous_best.shape))\n # logging.info('uber_M0.shape'+str(uber_M0.shape))\n\n # change between the initial model in this step and the imported model\n # global_change = uber_M0 - uber_M0_previous_best\n # global_change_proposed\n # global_change_proposed = (uber_M0 - uber_M0_previous_best) /\\\n # array_of_delta_parametrizations[:, None]\n\n # H is a change of wavefront\n H = array_of_wavefront_changes\n\n # H is a change of wavefront and change of model (is it?)\n # H=array_of_wavefront_changes + global_change[:,None]\n\n # np.save('/tigress/ncaplar/Results/global_change_'+str(2)+'_'+str(0),\\\n # global_change)\n\n # np.save('/tigress/ncaplar/Results/global_change_proposed_'+str(2)+'_'+str(0),\\\n # global_change_proposed)\n # np.save('/tigress/ncaplar/Results/array_of_wavefront_changes_'+str(2)+'_'+str(0),\\\n # array_of_wavefront_changes)\n # np.save('/tigress/ncaplar/Results/H_'+str(2)+'_'+str(0),\\\n # array_of_wavefront_changes)\n\n return H\n\n def create_first_proposal_Tokovnin(self, H, H_std, uber_I, uber_M0, uber_std, up_to_which_z=None):\n\n H_shape = H.shape\n\n # logging.info('H_shape'+str(H_shape))\n # logging.info('up_to_which_z:'+str(up_to_which_z))\n\n if up_to_which_z is not None:\n # H=H[:,1:(up_to_which_z-3)*2:2]\n # H_std=H_std[:,1:(up_to_which_z-3)*2:2]\n\n H = H[:, 0:(up_to_which_z - 3) * 2]\n H_std = H_std[:, 0:(up_to_which_z - 3) * 2]\n\n else:\n pass\n\n H = np.nan_to_num(H, 0)\n\n # logging.info('np.mean(H,axis=0).shape)'+str(np.mean(H,axis=0).shape))\n singlular_parameters = np.arange(H.shape[1])[np.abs((np.mean(H, axis=0))) < 0.001]\n non_singlular_parameters = np.arange(H.shape[1])[np.abs((np.mean(H, axis=0))) > 0.001]\n\n # logging.info('np.abs((np.mean(H,axis=0)))'+str(np.abs((np.mean(H,axis=0)))))\n # logging.info('non_singlular_parameters.shape)'+str(non_singlular_parameters.shape))\n # logging.info('singlular_parameters)'+str(singlular_parameters))\n\n H = H[:, non_singlular_parameters]\n H_std = H_std[:, non_singlular_parameters]\n\n HHt = np.matmul(np.transpose(H), H)\n HHt_std = np.matmul(np.transpose(H_std), H_std)\n # logging.info('svd thresh is '+str(thresh))\n # invHHt=svd_invert(HHt,thresh)\n # invHHt_std=svd_invert(HHt_std,thresh)\n invHHt = np.linalg.inv(HHt)\n invHHt_std = np.linalg.inv(HHt_std)\n\n invHHtHt = np.matmul(invHHt, np.transpose(H))\n invHHtHt_std = np.matmul(invHHt_std, np.transpose(H_std))\n\n # I is uber_I now (science images)\n # M0 is uber_M0 now (set of models before the iteration)\n first_proposal_Tokovnin = np.matmul(invHHtHt, uber_I - uber_M0)\n # first_proposal_Tokovnin_std=np.matmul(invHHtHt_std,uber_I_std-uber_M0_std)\n first_proposal_Tokovnin_std = np.matmul(invHHtHt_std, (uber_I - uber_M0) / uber_std.ravel())\n\n # logging.info('first_proposal_Tokovnin.shape before sing'+str(first_proposal_Tokovnin.shape))\n\n # if you have removed certain parameters because of the singularity, return them here, with no change\n if len(singlular_parameters) > 0:\n for i in range(len(singlular_parameters)):\n first_proposal_Tokovnin = np.insert(first_proposal_Tokovnin, singlular_parameters[i], 0)\n first_proposal_Tokovnin_std = np.insert(\n first_proposal_Tokovnin_std, singlular_parameters[i], 0)\n\n # logging.info('first_proposal_Tokovnin.shape after sing'+str(first_proposal_Tokovnin.shape))\n\n if up_to_which_z is not None:\n # H=H[:,1:(up_to_which_z-3)*2:2]\n # H_std=H_std[:,1:(up_to_which_z-3)*2:2]\n\n first_proposal_Tokovnin_0 = np.zeros((H_shape[1]))\n first_proposal_Tokovnin_0_std = np.zeros((H_shape[1]))\n # logging.info('first_proposal_Tokovnin_0.shape'+str(first_proposal_Tokovnin_0.shape))\n\n # logging.info('up_to_which_z: '+str(up_to_which_z))\n # logging.info('first_proposal_Tokovnin:' +str(first_proposal_Tokovnin))\n\n # logging.info(first_proposal_Tokovnin_0[0:(up_to_which_z-3)*2].shape)\n # logging.info(first_proposal_Tokovnin.shape)\n first_proposal_Tokovnin_0[0:(up_to_which_z - 3) * 2] = first_proposal_Tokovnin\n first_proposal_Tokovnin_0_std[0:(up_to_which_z - 3) * 2] = first_proposal_Tokovnin_std\n\n first_proposal_Tokovnin = first_proposal_Tokovnin_0\n first_proposal_Tokovnin_std = first_proposal_Tokovnin_0_std\n else:\n pass\n\n return first_proposal_Tokovnin, first_proposal_Tokovnin_std\n\n def create_linear_aproximation_prediction(self, H, first_proposal_Tokovnin):\n return np.dot(H, first_proposal_Tokovnin)\n\n def __call__(self, allparameters_parametrization_proposal, return_Images=True, num_iter=None,\n previous_best_result=None, use_only_chi=False,\n multi_background_factor=3, up_to_which_z=None):\n\n return self.Tokovinin_algorithm_chi_multi(\n allparameters_parametrization_proposal,\n return_Images=return_Images,\n num_iter=num_iter,\n previous_best_result=previous_best_result,\n use_only_chi=use_only_chi,\n multi_background_factor=int(multi_background_factor),\n up_to_which_z=up_to_which_z)\n\n\nclass LN_PFS_single(object):\n\n \"\"\"!\n\n Class to compute likelihood of the donut image, given the sci and var image\n Also the prinicpal way to get the images via ``return_Image'' option\n\n model = LN_PFS_single(sci_image,var_image,pupil_parameters=pupil_parameters,\n use_pupil_parameters=None,zmax=zmax,save=1)\n def model_return(allparameters_proposal):\n return model(allparameters_proposal,return_Image=True)\n\n Calls ZernikeFitterPFS class (constructModelImage_PFS_naturalResolution function)\n in order to create images\n\n Called by LN_PFS_multi_same_spot\n\n \"\"\"\n\n def __init__(self, sci_image, var_image,\n mask_image=None,\n wavelength=None, dithering=None, save=None, verbosity=None,\n pupil_parameters=None, use_pupil_parameters=None, use_optPSF=None, use_wf_grid=None,\n zmax=None, extraZernike=None, pupilExplicit=None, simulation_00=None,\n double_sources=None, double_sources_positions_ratios=None, npix=None,\n fit_for_flux=None, test_run=None, explicit_psf_position=None,\n use_only_chi=False, use_center_of_flux=False):\n \"\"\"\n @param sci_image science image, 2d array\n @param var_image variance image, 2d array,same size as sci_image\n @param mask_image mask image, 2d array,same size as sci_image\n @param dithering dithering, 1=normal, 2=two times higher resolution,\n 3=not supported\n @param save save intermediate result in the process\n (set value at 1 for saving)\n @param verbosity verbosity of the process\n (set value at 1 for full output)\n @param pupil_parameters\n @param use_pupil_parameters\n @param use_optPSF\n @param zmax largest Zernike order used (11 or 22)\n @param extraZernike array consistingin of higher order zernike\n (if using higher order than 22)\n @param pupilExplicit\n @param simulation_00 resulting image will be centered with optical center\n in the center of the image\n and not fitted acorrding to the sci_image\n if use_center_of_flux==True, gives with center\n of the flux in the center of the image\n @param double_sources 1 if there are other secondary sources in the image\n @param double_sources_positions_ratios / arrray with parameters describing relative position\\\n and relative flux of the secondary source(s)\n @param npxix size of the pupil\n\n @param fit_for_flux automatically fit for the best flux level\n that minimizes the chi**2\n @param test_run if True, skips the creation of model\n and return science image - useful for testing\n interaction of outputs of the module\n in broader setting quickly\n @param explicit_psf_position gives position of the opt_psf\n \"\"\"\n\n if verbosity is None:\n verbosity = 0\n\n if use_pupil_parameters is not None:\n assert pupil_parameters is not None\n\n if double_sources is not None and bool(double_sources) is not False:\n assert np.sum(np.abs(double_sources_positions_ratios)) > 0\n\n if zmax is None:\n zmax = 11\n\n if zmax == 11:\n self.columns = [\n 'z4',\n 'z5',\n 'z6',\n 'z7',\n 'z8',\n 'z9',\n 'z10',\n 'z11',\n 'detFrac',\n 'strutFrac',\n 'dxFocal',\n 'dyFocal',\n 'slitFrac',\n 'slitFrac_dy',\n 'wide_0',\n 'wide_23',\n 'wide_43',\n 'misalign',\n 'x_fiber',\n 'y_fiber',\n 'effective_ilum_radius',\n 'frd_sigma',\n 'frd_lorentz_factor',\n 'det_vert',\n 'slitHolder_frac_dx',\n 'grating_lines',\n 'scattering_slope',\n 'scattering_amplitude',\n 'pixel_effect',\n 'fiber_r',\n 'flux']\n if zmax >= 22:\n self.columns = [\n 'z4',\n 'z5',\n 'z6',\n 'z7',\n 'z8',\n 'z9',\n 'z10',\n 'z11',\n 'z12',\n 'z13',\n 'z14',\n 'z15',\n 'z16',\n 'z17',\n 'z18',\n 'z19',\n 'z20',\n 'z21',\n 'z22',\n 'detFrac',\n 'strutFrac',\n 'dxFocal',\n 'dyFocal',\n 'slitFrac',\n 'slitFrac_dy',\n 'wide_0',\n 'wide_23',\n 'wide_43',\n 'misalign',\n 'x_fiber',\n 'y_fiber',\n 'effective_ilum_radius',\n 'frd_sigma',\n 'frd_lorentz_factor',\n 'det_vert',\n 'slitHolder_frac_dx',\n 'grating_lines',\n 'scattering_slope',\n 'scattering_amplitude',\n 'pixel_effect',\n 'fiber_r',\n 'flux']\n\n if mask_image is None:\n mask_image = np.zeros(sci_image.shape)\n self.mask_image = mask_image\n self.sci_image = sci_image\n self.var_image = var_image\n\n popt_for_custom_var_image = self.create_popt_for_custom_var(sci_image, var_image, mask_image)\n self.popt_for_custom_var_image = popt_for_custom_var_image\n\n self.dithering = dithering\n self.pupil_parameters = pupil_parameters\n self.use_pupil_parameters = use_pupil_parameters\n self.use_optPSF = use_optPSF\n self.pupilExplicit = pupilExplicit\n self.simulation_00 = simulation_00\n if self.simulation_00:\n self.simulation_00 = 1\n\n self.zmax = zmax\n self.extraZernike = extraZernike\n self.verbosity = verbosity\n self.double_sources = double_sources\n self.double_sources_positions_ratios = double_sources_positions_ratios\n self.fit_for_flux = fit_for_flux\n if test_run is None:\n self.test_run = False\n else:\n self.test_run = test_run\n self.explicit_psf_position = explicit_psf_position\n\n # if npix is not specified automatically scale the image\n # this will create images which will have different pupil size for different sizes of science image\n # and this will influence the results\n if npix is None:\n if dithering is None or dithering == 1:\n npix = int(math.ceil(int(1024 * sci_image.shape[0] / (20 * 4))) * 2)\n else:\n npix = int(math.ceil(int(1024 * sci_image.shape[0] / (20 * 4 * self.dithering))) * 2)\n else:\n self.npix = npix\n\n if verbosity == 1:\n logging.info('Science image shape is: ' + str(sci_image.shape))\n logging.info('Top left pixel value of the science image is: ' + str(sci_image[0][0]))\n logging.info('Variance image shape is: ' + str(sci_image.shape))\n logging.info('Top left pixel value of the variance image is: ' + str(var_image[0][0]))\n logging.info('Mask image shape is: ' + str(sci_image.shape))\n logging.info('Sum of mask image is: ' + str(np.sum(mask_image)))\n logging.info('Dithering value is: ' + str(dithering))\n logging.info('')\n\n logging.info('explicit_psf_position in LN_PFS_single: '+str(explicit_psf_position))\n logging.info('supplied extra Zernike parameters (beyond zmax): ' + str(extraZernike))\n\n \"\"\"\n parameters that go into ZernikeFitterPFS\n def __init__(self,image=None,image_var=None,image_mask=None,pixelScale=None,wavelength=None,\n diam_sic=None,npix=None,pupilExplicit=None,\n wf_full_Image=None,radiometricEffectArray_Image=None,\n ilum_Image=None,dithering=None,save=None,\n pupil_parameters=None,use_pupil_parameters=None,\n use_optPSF=None,use_wf_grid=None,\n zmaxInit=None,extraZernike=None,simulation_00=None,verbosity=None,\n double_sources=None,double_sources_positions_ratios=None,\n test_run=None,explicit_psf_position=None,*args):\n \"\"\"\n\n # how are these two approaches different?\n if pupil_parameters is None:\n single_image_analysis = ZernikeFitterPFS(\n sci_image,\n var_image,\n image_mask=mask_image,\n wavelength=wavelength,\n npix=npix,\n pupilExplicit=pupilExplicit,\n wf_full_Image=None,\n ilum_Image=None,\n dithering=dithering,\n save=save,\n pupil_parameters=pupil_parameters,\n use_pupil_parameters=use_pupil_parameters,\n use_optPSF=use_optPSF,\n use_wf_grid=use_wf_grid,\n zmaxInit=zmax,\n extraZernike=extraZernike,\n simulation_00=self.simulation_00,\n verbosity=verbosity,\n double_sources=double_sources,\n double_sources_positions_ratios=double_sources_positions_ratios,\n test_run=test_run,\n explicit_psf_position=explicit_psf_position,\n use_only_chi=use_only_chi,\n use_center_of_flux=use_center_of_flux)\n single_image_analysis.initParams(zmax)\n self.single_image_analysis = single_image_analysis\n else:\n single_image_analysis = ZernikeFitterPFS(\n sci_image,\n var_image,\n image_mask=mask_image,\n npix=npix,\n dithering=dithering,\n save=save,\n pupil_parameters=pupil_parameters,\n use_pupil_parameters=use_pupil_parameters,\n extraZernike=extraZernike,\n simulation_00=self.simulation_00,\n verbosity=verbosity,\n double_sources=double_sources,\n double_sources_positions_ratios=double_sources_positions_ratios,\n test_run=test_run,\n explicit_psf_position=explicit_psf_position,\n use_only_chi=use_only_chi,\n use_center_of_flux=use_center_of_flux)\n\n single_image_analysis.initParams(\n zmax,\n detFracInit=pupil_parameters[0],\n strutFracInit=pupil_parameters[1],\n focalPlanePositionInit=(\n pupil_parameters[2],\n pupil_parameters[3]),\n slitFracInit=pupil_parameters[4],\n slitFrac_dy_Init=pupil_parameters[5],\n x_fiberInit=pupil_parameters[6],\n y_fiberInit=pupil_parameters[7],\n effective_ilum_radiusInit=pupil_parameters[8],\n frd_sigmaInit=pupil_parameters[9],\n det_vertInit=pupil_parameters[10],\n slitHolder_frac_dxInit=pupil_parameters[11],\n wide_0Init=pupil_parameters[12],\n wide_23Init=pupil_parameters[13],\n wide_43Init=pupil_parameters[14],\n misalignInit=pupil_parameters[15])\n\n self.single_image_analysis = single_image_analysis\n\n def create_popt_for_custom_var(self, sci_image, var_image, mask_image=None):\n \"\"\"Create 2nd order poly fit; to be used in creation of custom var image\n\n TODO: same function in LN_PFS_Single... Very unsatifactory!\n\n The connection between variance and flux is determined from the provided science image\n and variance image.\n All of inputs have to be 2d np.arrays with same size.\n Introduced in 0.50 (PIPE2D-931)\n\n Called by Tokovinin_algorithm_chi_multi\n\n Parameters\n ----------\n sci_image : `np.array`\n Scientific array\n var_image : `np.array`\n Variance array\n mask_image : `np.array`\n Mask image\n\n Returns\n ----------\n custom_var_image : `np.array`\n Recreated variance map\n\n \"\"\"\n if mask_image is None:\n sci_pixels = sci_image.ravel()\n var_pixels = var_image.ravel()\n else:\n sci_pixels = sci_image[mask_image == 0].ravel()\n var_pixels = var_image[mask_image == 0].ravel()\n # z = np.polyfit(sci_pixels, var_pixels, deg=2)\n # if z[0] < 0:\n # z = np.polyfit(sci_pixels, var_pixels, deg=1)\n # p1 = np.poly1d(z)\n # custom_var_image = p1(sci_image)\n\n # I am using lambda expression to avoid superflous definition of quadratic function\n f = lambda x, *p: p[0] * x**2 + p[1] * x + p[2] # noqa : E373\n popt, pcov = scipy.optimize.curve_fit(f, sci_pixels, var_pixels, [0, 0, np.min(var_pixels)],\n bounds=([-np.inf, -np.inf, np.min(var_pixels)],\n [np.inf, np.inf, np.inf]))\n return popt\n\n def create_custom_var_from_popt(self, model_image, popt):\n \"\"\"Creates variance map from the model image, given the 2nd poly fit parameters\n\n Introduced in 0.50 (PIPE2D-931)\n\n Parameters\n ----------\n modelImg : `np.array`\n Model image\n popt : `np.array`\n 2d polyfit parameters\n Returns\n ----------\n custom_var_image : `np.array`\n Recreated variance map\n \"\"\"\n\n # I am using lambda expression to avoid superflous definition of quadratic function\n f = lambda x, *p: p[0] * x**2 + p[1] * x + p[2] # noqa : E373\n custom_var_image = f(model_image, *popt)\n return custom_var_image\n\n def create_custom_var(self, model_image, sci_image, var_image, mask_image):\n \"\"\"Creates variance map from the model image, sci, var and mask_image\n\n Introduced in 0.50 (PIPE2D-931)\n\n Parameters\n ----------\n modelImg : `np.array`\n Model image\n ...\n Returns\n ----------\n custom_var_image : `np.array`\n Recreated variance map\n \"\"\"\n\n popt_for_custom_var = self.create_popt_for_custom_var(sci_image, var_image, mask_image)\n custom_var_image = self.create_custom_var_from_popt(model_image, popt_for_custom_var)\n return custom_var_image\n\n def create_chi_2_almost(\n self,\n modelImg,\n sci_image,\n var_image,\n mask_image,\n use_only_chi=False,\n multi_background_factor=3):\n \"\"\"Create values describing the quality of the fit\n\n Parameters\n ----------\n modelImg : `np.array`\n Model image\n sci_image : `np.array`\n Scientific image\n var_image : `np.array`\n Variance image\n mask_image : `np.array`\n Mask image\n use_only_chi : `bool`\n If True, the program is reporting np.abs(chi), not chi^2\n\n\n Returns\n ----------\n (5 values) : `list`\n 0. normal chi**2\n 1. what is 'instrinsic' chi**2, i.e., just sum((scientific image)**2/variance)\n 2. 'Q' value = sum(abs(model - scientific image))/sum(scientific image)\n 3. chi**2 reduced\n 4. chi**2 reduced 'intrinsic'\n\n The descriptions below are applicable when use_only_chi = False\n \"\"\"\n\n try:\n if sci_image.shape[0] == 20:\n multi_background_factor = 3\n\n mean_value_of_background_via_var = np.mean([np.median(var_image[0]), np.median(\n var_image[-1]), np.median(var_image[:, 0]), np.median(var_image[:, -1])])\\\n * multi_background_factor\n\n mean_value_of_background_via_sci = np.mean([np.median(sci_image[0]), np.median(\n sci_image[-1]), np.median(sci_image[:, 0]), np.median(sci_image[:, -1])])\\\n * multi_background_factor\n\n mean_value_of_background = np.max(\n [mean_value_of_background_via_var, mean_value_of_background_via_sci])\n\n flux_mask = sci_image > (mean_value_of_background)\n inverted_flux_mask = flux_mask.astype(bool)\n except BaseException:\n inverted_flux_mask = np.ones(sci_image.shape)\n\n # array that has True for values which are good and False for bad values\n inverted_mask = ~mask_image.astype(bool)\n\n # strengthen the mask by taking in the account only bright pixels, which have passed the flux cut\n inverted_mask = inverted_mask * inverted_flux_mask\n\n use_custom_var = True\n if use_custom_var:\n # logging.info('Test checkpoint 1')\n # logging.info('modelImg.shape'+str(modelImg.shape))\n # logging.info('modelImg[0][0:5]'+str(modelImg[0][0:5]))\n # logging.info('sci_image.shape'+str(sci_image.shape))\n # logging.info('sci_image[0][0:5]'+str(sci_image[0][0:5]))\n # logging.info('var_image.shape'+str(var_image.shape))\n # logging.info('var_image[0][0:5]'+str(var_image[0][0:5]))\n # logging.info('mask_image.shape'+str(mask_image.shape))\n # logging.info('mask_image[0][0:5]'+str(mask_image[0][0:5]))\n custom_var_image = self.create_custom_var_from_popt(modelImg, self.popt_for_custom_var_image)\n # logging.info('custom_var_image[0][0:5]'+str(custom_var_image[0][0:5]))\n # overload var_image with newly created image\n var_image = custom_var_image\n\n # apply the mask on all of the images (sci, var and model)\n var_image_masked = var_image * inverted_mask\n sci_image_masked = sci_image * inverted_mask\n modelImg_masked = modelImg * inverted_mask\n\n # logging.info('First 5 values are: '+str(var_image_masked[0:5]))\n\n # sigma values\n sigma_masked = np.sqrt(var_image_masked)\n\n # chi array\n chi = (sci_image_masked - modelImg_masked) / sigma_masked\n # chi intrinsic, i.e., without subtracting model\n chi_intrinsic = (sci_image_masked / sigma_masked)\n\n # ravel and remove bad values\n chi_without_nan = chi.ravel()[~np.isnan(chi.ravel())]\n chi_intrinsic_without_nan = chi_intrinsic.ravel()[~np.isnan(chi_intrinsic.ravel())]\n\n if not use_only_chi:\n # If you are computing chi**2, square it\n chi2_res = (chi_without_nan)**2\n chi2_intrinsic_res = (chi_intrinsic_without_nan)**2\n else:\n # If you are just using chi, do not square it\n # keep the names the same, but very careful as they are not squared quantities\n chi2_res = np.abs(chi_without_nan)**1\n chi2_intrinsic_res = np.abs(chi_intrinsic_without_nan)**1\n\n # logging.info('use_only_chi variable in create_chi_2_almost is: '+str(use_only_chi))\n # logging.info('chi2_res '+str(np.sum(chi2_res)))\n # logging.info('chi2_intrinsic_res '+str(np.sum(chi2_intrinsic_res)))\n\n # calculates 'Q' values\n Qlist = np.abs((sci_image_masked - modelImg_masked))\n Qlist_without_nan = Qlist.ravel()[~np.isnan(Qlist.ravel())]\n sci_image_without_nan = sci_image_masked.ravel()[~np.isnan(sci_image_masked.ravel())]\n Qvalue = np.sum(Qlist_without_nan) / np.sum(sci_image_without_nan)\n\n # return the result\n return [\n np.sum(chi2_res),\n np.sum(chi2_intrinsic_res),\n Qvalue,\n np.mean(chi2_res),\n np.mean(chi2_intrinsic_res)]\n\n def lnlike_Neven(\n self,\n allparameters,\n return_Image=False,\n return_intermediate_images=False,\n use_only_chi=False,\n multi_background_factor=3):\n \"\"\"Report `likelihood` given the parameters of the model\n\n The algorithm gives -np.inf if one of the parameters is outside of the specified range\n (which are pecified below)\n\n Parameters\n ----------\n allparameters : `np.array`\n Model image\n return_Image : `bool\n explanation\n return_intermediate_images : `bool`\n explanation\n use_only_chi : `bool`\n explanation\n multi_background_factor : `float\n explanation\n\n Parameters\n ----------\n (if not return_Image)\n res : `float`\n `Likelihood` of the fit\n psf_position : `np.array`\n Position at which model has been centered\n (if return_Image==True)\n res : `float\n `Likelihood` of the fit\n modelImg : `np.array`\n Model image\n allparameters : `np.array`\n Parameters describing the model\n (quality) : `list`\n 0. chi_2_almost : `float`\n chi2/chi total\n 1. chi_2_almost_max : `float`\n Total chi2/chi without subtracting the model\n 2. chi_2_almost_dof : `float\n chi2/chi total divided by the number of pixels\n 3. chi_2_almost_max_dof : `float`\n Total chi2/chi without subtracting the model, divided by the number of pixels\n psf_position : `np.array`\n Position at which model has been centered\n (if return_intermediate_images)\n res : `float\n `Likelihood` of the fit\n modelImg : `np.array`\n Model image\n allparameters : `np.array`\n Parameters describing the model\n ilum : `np.array`\n Illumination of the pupil\n wf_grid_rot : `np.array`\n Wavfefront across the pupil\n (quality) : `list`\n 0. chi_2_almost : `float`\n chi2/chi total\n 1. chi_2_almost_max : `float`\n Total chi2/chi without subtracting the model\n 2. chi_2_almost_dof : `float\n chi2/chi total divided by the number of pixels\n 3. chi_2_almost_max_dof : `float`\n Total chi2/chi without subtracting the model, divided by the number of pixels\n psf_position : `np.array`\n Position at which model has been centered\n \"\"\"\n\n time_lnlike_start = time.time()\n\n if self.verbosity == 1:\n logging.info('')\n logging.info('Entering lnlike_Neven')\n logging.info('allparameters ' + str(allparameters))\n\n if self.pupil_parameters is not None:\n if len(allparameters) < 25:\n allparameters = add_pupil_parameters_to_all_parameters(allparameters, self.pupil_parameters)\n else:\n allparameters = add_pupil_parameters_to_all_parameters(\n remove_pupil_parameters_from_all_parameters(allparameters), self.pupil_parameters)\n\n if self.zmax <= 22:\n zmax_number = self.zmax - 3\n else:\n zmax_number = 19\n zparameters = allparameters[0:zmax_number]\n\n globalparameters = allparameters[len(zparameters):len(zparameters) + 23]\n\n # if self.fit_for_flux==True:\n # globalparameters=np.concatenate((globalparameters,np.array([1])))\n\n # internal parameter for debugging change value to 1 to see which parameters are failling\n test_print = 0\n if self.verbosity == 1:\n test_print = 1\n\n # When running big fits these are limits which ensure\n # that the code does not wander off in totally non physical region\n # det frac\n if globalparameters[0] < 0.6 or globalparameters[0] > 0.8:\n logging.info('globalparameters[0] outside limits; value: '\n + str(globalparameters[0])) if test_print == 1 else False\n return -np.inf\n\n # strut frac\n if globalparameters[1] < 0.07 or globalparameters[1] > 0.13:\n logging.info('globalparameters[1] outside limits') if test_print == 1 else False\n return -np.inf\n\n # slit_frac < strut frac\n # if globalparameters[4]<globalparameters[1]:\n # logging.info('globalparameters[1] not smaller than 4 outside limits')\n # return -np.inf\n\n # dx Focal\n if globalparameters[2] > 0.4:\n logging.info('globalparameters[2] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[2] < -0.4:\n logging.info('globalparameters[2] outside limits') if test_print == 1 else False\n return -np.inf\n\n # dy Focal\n if globalparameters[3] > 0.4:\n logging.info('globalparameters[3] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[3] < -0.4:\n logging.info('globalparameters[3] outside limits') if test_print == 1 else False\n return -np.inf\n\n # slitFrac\n if globalparameters[4] < 0.05:\n logging.info('globalparameters[4] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[4] > 0.09:\n logging.info('globalparameters[4] outside limits') if test_print == 1 else False\n return -np.inf\n\n # slitFrac_dy\n if globalparameters[5] < -0.5:\n logging.info('globalparameters[5] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[5] > 0.5:\n logging.info('globalparameters[5] outside limits') if test_print == 1 else False\n return -np.inf\n\n # wide_0\n if globalparameters[6] < 0:\n logging.info('globalparameters[6] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[6] > 1:\n logging.info('globalparameters[6] outside limits') if test_print == 1 else False\n return -np.inf\n\n # wide_23\n if globalparameters[7] < 0:\n logging.info('globalparameters[7] outside limits') if test_print == 1 else False\n return -np.inf\n # changed in w_23\n if globalparameters[7] > 1:\n logging.info('globalparameters[7] outside limits') if test_print == 1 else False\n return -np.inf\n\n # wide_43\n if globalparameters[8] < 0:\n logging.info('globalparameters[8] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[8] > 1:\n logging.info('globalparameters[8] outside limits') if test_print == 1 else False\n return -np.inf\n\n # misalign\n if globalparameters[9] < 0:\n logging.info('globalparameters[9] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[9] > 12:\n logging.info('globalparameters[9] outside limits') if test_print == 1 else False\n return -np.inf\n\n # x_fiber\n if globalparameters[10] < -0.4:\n logging.info('globalparameters[10] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[10] > 0.4:\n logging.info('globalparameters[10] outside limits') if test_print == 1 else False\n return -np.inf\n\n # y_fiber\n if globalparameters[11] < -0.4:\n logging.info('globalparameters[11] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[11] > 0.4:\n logging.info('globalparameters[11] outside limits') if test_print == 1 else False\n return -np.inf\n\n # effective_radius_illumination\n if globalparameters[12] < 0.7:\n logging.info('globalparameters[12] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[12] > 1.0:\n logging.info('globalparameters[12] outside limits with value '\n + str(globalparameters[12])) if test_print == 1 else False\n return -np.inf\n\n # frd_sigma\n if globalparameters[13] < 0.01:\n logging.info('globalparameters[13] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[13] > .4:\n logging.info('globalparameters[13] outside limits') if test_print == 1 else False\n return -np.inf\n\n # frd_lorentz_factor\n if globalparameters[14] < 0.01:\n logging.info('globalparameters[14] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[14] > 1:\n logging.info('globalparameters[14] outside limits') if test_print == 1 else False\n return -np.inf\n\n # det_vert\n if globalparameters[15] < 0.85:\n logging.info('globalparameters[15] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[15] > 1.15:\n logging.info('globalparameters[15] outside limits') if test_print == 1 else False\n return -np.inf\n\n # slitHolder_frac_dx\n if globalparameters[16] < -0.8:\n logging.info('globalparameters[16] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[16] > 0.8:\n logging.info('globalparameters[16] outside limits') if test_print == 1 else False\n return -np.inf\n\n # grating_lines\n if globalparameters[17] < 1200:\n logging.info('globalparameters[17] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[17] > 120000:\n logging.info('globalparameters[17] outside limits') if test_print == 1 else False\n return -np.inf\n\n # scattering_slope\n if globalparameters[18] < 1.5:\n logging.info('globalparameters[18] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[18] > +3.0:\n logging.info('globalparameters[18] outside limits') if test_print == 1 else False\n return -np.inf\n\n # scattering_amplitude\n if globalparameters[19] < 0:\n logging.info('globalparameters[19] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[19] > +0.4:\n logging.info('globalparameters[19] outside limits') if test_print == 1 else False\n return -np.inf\n\n # pixel_effect\n if globalparameters[20] < 0.15:\n logging.info('globalparameters[20] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[20] > +0.8:\n logging.info('globalparameters[20] outside limits') if test_print == 1 else False\n return -np.inf\n\n # fiber_r\n if globalparameters[21] < 1.74:\n logging.info('globalparameters[21] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[21] > +1.98:\n logging.info('globalparameters[21] outside limits') if test_print == 1 else False\n return -np.inf\n\n # flux\n if self.fit_for_flux:\n globalparameters[22] = 1\n else:\n if globalparameters[22] < 0.98:\n logging.info('globalparameters[22] outside limits') if test_print == 1 else False\n return -np.inf\n if globalparameters[22] > 1.02:\n logging.info('globalparameters[22] outside limits') if test_print == 1 else False\n return -np.inf\n\n x = self.create_x(zparameters, globalparameters)\n\n for i in range(len(self.columns)):\n self.single_image_analysis.params[self.columns[i]].set(x[i])\n\n if len(allparameters) > len(self.columns):\n if self.verbosity == 1:\n logging.info('We are going higher than Zernike 22!')\n extra_Zernike_parameters = allparameters[len(self.columns):]\n if self.verbosity == 1:\n logging.info('extra_Zernike_parameters ' + str(extra_Zernike_parameters))\n else:\n extra_Zernike_parameters = None\n if self.verbosity == 1:\n logging.info('No extra Zernike (beyond zmax)')\n\n # if it is not a test run, run the actual code\n if not self.test_run:\n # this try statment avoids code crashing when code tries to analyze weird\n # combination of parameters which fail to produce an image\n try:\n if not return_intermediate_images:\n modelImg, psf_position =\\\n self.single_image_analysis.constructModelImage_PFS_naturalResolution(\n self.single_image_analysis.params, extraZernike=extra_Zernike_parameters,\n return_intermediate_images=return_intermediate_images)\n if return_intermediate_images:\n modelImg, ilum, wf_grid_rot, psf_position =\\\n self.single_image_analysis.constructModelImage_PFS_naturalResolution(\n self.single_image_analysis.params, extraZernike=extra_Zernike_parameters,\n return_intermediate_images=return_intermediate_images)\n except IndexError:\n return -np.inf, -np.inf\n else:\n randomizer_array = np.random.randn(self.sci_image.shape[0], self.sci_image.shape[1]) / 100 + 1\n if not return_intermediate_images:\n\n modelImg = self.sci_image * randomizer_array\n psf_position = [0, 0]\n\n if self.verbosity == 1:\n logging.info('Careful - the model image is created in a test_run')\n else:\n # ilum_test=np.ones((3072,3072))\n ilum_test = np.ones((30, 30))\n\n # wf_grid_rot_test=np.ones((3072,3072))\n wf_grid_rot_test = np.ones((30, 30))\n\n psf_position_test = [0, 0]\n\n modelImg, ilum, wf_grid_rot, psf_position = self.sci_image * \\\n randomizer_array, ilum_test, wf_grid_rot_test, psf_position_test\n if self.verbosity == 1:\n logging.info('Careful - the model image is created in a test_run')\n logging.info('test run with return_intermediate_images==True!')\n\n # if image is in focus, which at the moment is size of post stamp image of 20 by 20\n # logging.info('self.sci_image.shape[0]'+str(self.sci_image.shape[0]))\n if self.sci_image.shape[0] == 20:\n # apply the procedure from\n # https://github.com/Subaru-PFS/drp_stella/blob/master/python/pfs/drp/stella/subtractSky2d.py\n # `image` from the pipeline is `sci_image` here\n # `psfImage` from the pipeline is `modelImg` here\n # `image.mask` from the pipeline is `mask_image` here\n # `image.variance` from the pipeline is `var_image` here\n\n inverted_mask = ~self.mask_image.astype(bool)\n\n modelDotModel = np.sum(modelImg[inverted_mask]**2)\n modelDotData = np.sum(modelImg[inverted_mask] * self.sci_image[inverted_mask])\n # modelDotModelVariance = np.sum(modelImg[inverted_mask]**2 * self.var_image[inverted_mask])\n flux = modelDotData / modelDotModel\n # fluxErr = np.sqrt(modelDotModelVariance) / modelDotModel\n\n modelImg = modelImg * flux\n if self.verbosity == 1:\n logging.info('Image in focus, using pipeline normalization;\\\n multiplying all values in the model by ' + str(flux))\n\n else:\n\n if self.fit_for_flux:\n if self.verbosity == 1:\n logging.info('Internally fitting for flux; disregarding passed value for flux')\n\n def find_flux_fit(flux_fit):\n return self.create_chi_2_almost(\n flux_fit * modelImg,\n self.sci_image,\n self.var_image,\n self.mask_image,\n use_only_chi=use_only_chi)[0]\n\n flux_fitting_result = scipy.optimize.shgo(find_flux_fit, bounds=[(0.98, 1.02)], iters=6)\n flux = flux_fitting_result.x[0]\n if len(allparameters) == 42:\n allparameters[-1] = flux\n if len(allparameters) == 41:\n allparameters = np.concatenate((allparameters, np.array([flux])))\n else:\n # logging.info('here')\n # logging.info(allparameters[41])\n if (allparameters[41] < 1.1) and (allparameters[41] > 0.9):\n allparameters[41] = flux\n else:\n pass\n # logging.info('flux: '+str(flux))\n # logging.info(len(allparameters))\n # logging.info(allparameters)\n\n modelImg = modelImg * flux\n if self.verbosity == 1:\n logging.info('Internally fitting for flux; multiplying all values in the model by '\n + str(flux))\n else:\n pass\n\n # returns 0. chi2 value, 1. chi2_max value, 2. Qvalue, 3. chi2/d.o.f., 4. chi2_max/d.o.f.\n chi_2_almost_multi_values = self.create_chi_2_almost(\n modelImg,\n self.sci_image,\n self.var_image,\n self.mask_image,\n use_only_chi=use_only_chi,\n multi_background_factor=multi_background_factor)\n chi_2_almost = chi_2_almost_multi_values[0]\n chi_2_almost_max = chi_2_almost_multi_values[1]\n chi_2_almost_dof = chi_2_almost_multi_values[3]\n chi_2_almost_max_dof = chi_2_almost_multi_values[4]\n\n # res stands for ``result''\n if not use_only_chi:\n # reporting likelihood in chi^2 case\n res = -(1 / 2) * (chi_2_almost + np.sum(np.log(2 * np.pi * self.var_image)))\n else:\n # reporting np.abs(chi) per d.o.f.\n res = -(1 / 1) * (chi_2_almost_dof)\n\n time_lnlike_end = time.time()\n if self.verbosity:\n logging.info('Finished with lnlike_Neven')\n if not use_only_chi:\n logging.info('chi_2_almost/d.o.f is ' + str(chi_2_almost_dof)\n + '; chi_2_almost_max_dof is ' + str(chi_2_almost_max_dof)\n + '; log(improvment) is '\n + str(np.log10(chi_2_almost_dof / chi_2_almost_max_dof)))\n else:\n logging.info('chi_almost/d.o.f is ' + str(chi_2_almost_dof)\n + '; chi_almost_max_dof is ' + str(chi_2_almost_max_dof)\n + '; log(improvment) is '\n + str(np.log10(chi_2_almost_dof / chi_2_almost_max_dof)))\n\n logging.info('The `likelihood` reported is: ' + str(res))\n # logging.info('multiprocessing.current_process() ' +\n # str(current_process()) + ' thread ' + str(threading.get_ident()))\n # logging.info(str(platform.uname()))\n logging.info('Time for lnlike_Neven function in thread ' + str(threading.get_ident())\n + ' is: ' + str(time_lnlike_end - time_lnlike_start) + str(' seconds'))\n logging.info(' ')\n\n if not return_Image:\n return res, psf_position\n else:\n # if return_Image==True return: 0. likelihood, 1. model image, 2.\n # parameters, 3. [0. chi**2, 1. chi**2_max, 2. chi**2/dof, 3.\n # chi**2_max/dof]\n if not return_intermediate_images:\n return res, modelImg, allparameters,\\\n [chi_2_almost, chi_2_almost_max, chi_2_almost_dof, chi_2_almost_max_dof], psf_position\n if return_intermediate_images:\n return res, modelImg, allparameters, ilum, wf_grid_rot,\\\n [chi_2_almost, chi_2_almost_max, chi_2_almost_dof, chi_2_almost_max_dof], psf_position\n\n def create_x(self, zparameters, globalparameters):\n \"\"\"\n Given the zparameters and globalparameters separtly, this code moves them in a single array\n\n @param zparameters Zernike coefficents\n @param globalparameters other parameters describing the system\n \"\"\"\n x = np.zeros((len(zparameters) + len(globalparameters)))\n for i in range(len(zparameters)):\n x[i] = zparameters[i]\n\n for i in range(len(globalparameters)):\n x[int(len(zparameters) / 1) + i] = globalparameters[i]\n\n return x\n\n def __call__(self, allparameters, return_Image=False, return_intermediate_images=False,\n use_only_chi=False, multi_background_factor=3):\n return self.lnlike_Neven(allparameters, return_Image=return_Image,\n return_intermediate_images=return_intermediate_images,\n use_only_chi=use_only_chi,\n multi_background_factor=multi_background_factor)\n\n\nclass LNP_PFS(object):\n def __init__(self, image=None, image_var=None):\n self.image = image\n self.image_var = image_var\n\n def __call__(self, image=None, image_var=None):\n return 0.0\n\n\nclass PFSLikelihoodModule(object):\n \"\"\"\n PFSLikelihoodModule class for calculating a likelihood for cosmoHammer.ParticleSwarmOptimizer\n \"\"\"\n\n def __init__(self, model, explicit_wavefront=None):\n \"\"\"\n\n Constructor of the PFSLikelihoodModule\n \"\"\"\n self.model = model\n self.explicit_wavefront = explicit_wavefront\n\n def computeLikelihood(self, ctx):\n \"\"\"\n Computes the likelihood using information from the context\n \"\"\"\n # Get information from the context. This can be results from a core\n # module or the parameters coming from the sampler\n params = ctx.getParams()[0]\n return_Images_value = ctx.getParams()[1]\n\n logging.info('params' + str(params))\n\n # Calculate a likelihood up to normalization\n lnprob = self.model(params, return_Images=return_Images_value)\n\n # logging.info('current_process is: '+str(current_process())+str(lnprob))\n # logging.info(params)\n # logging.info('within computeLikelihood: parameters-hash '+\\\n # str(hash(str(params.data)))+'/threading: '+str(threading.get_ident()))\n\n # sys.stdout.flush()\n # Return the likelihood\n return lnprob\n\n def setup(self):\n \"\"\"\n Sets up the likelihood module.\n Tasks that need to be executed once per run\n \"\"\"\n # e.g. load data from files\n\n logging.info(\"PFSLikelihoodModule setup done\")\n\n\nclass PsfPosition(object):\n \"\"\"\n Class that deals with positioning the PSF model in respect to the data\n\n Function find_single_realization_min_cut enables the fit to the data\n \"\"\"\n\n def __init__(self, image, oversampling, size_natural_resolution, simulation_00=False,\n verbosity=0, save=None, PSF_DIRECTORY=None):\n \"\"\"\n Parameters\n -----------------\n image: `np.array`, (N, N)\n oversampled model image\n oversampling: `int`\n by how much is the the oversampled image oversampled\n simulation_00: `bool`\n if True, put optical center of the model image in\n the center of the final image\n verbosity: `int`\n how verbose the procedure is (1 for full verbosity)\n save: `int`\n save intermediate images on hard drive (1 for save)\n PSF_DIRECTORY: `str`\n\n \"\"\"\n self.image = image\n self.oversampling = oversampling\n self.size_natural_resolution = size_natural_resolution\n self.simulation_00 = simulation_00\n self.verbosity = verbosity\n if save is None:\n save = 0\n self.save = save\n self.PSF_DIRECTORY = PSF_DIRECTORY\n\n if self.PSF_DIRECTORY is not None:\n self.TESTING_FOLDER = self.PSF_DIRECTORY + 'Testing/'\n self.TESTING_PUPIL_IMAGES_FOLDER = self.TESTING_FOLDER + 'Pupil_Images/'\n self.TESTING_WAVEFRONT_IMAGES_FOLDER = self.TESTING_FOLDER + 'Wavefront_Images/'\n self.TESTING_FINAL_IMAGES_FOLDER = self.TESTING_FOLDER + 'Final_Images/'\n\n @staticmethod\n def cut_Centroid_of_natural_resolution_image(image, size_natural_resolution, oversampling, dx, dy):\n \"\"\"Cut the central part from a larger oversampled image\n\n @param image input image\n @param size_natural_resolution size of new image in natural units\n @param oversampling oversampling\n\n @returns central part of the input image\n \"\"\"\n positions_from_where_to_start_cut = [int(len(image) / 2 - size_natural_resolution / 2\n - dx * oversampling + 1),\n int(len(image) / 2 - size_natural_resolution / 2\n - dy * oversampling + 1)]\n\n res = image[positions_from_where_to_start_cut[1]:positions_from_where_to_start_cut[1]\n + int(size_natural_resolution),\n positions_from_where_to_start_cut[0]:positions_from_where_to_start_cut[0]\n + int(size_natural_resolution)]\n return res\n\n def find_single_realization_min_cut(\n self,\n input_image,\n oversampling,\n size_natural_resolution,\n sci_image,\n var_image,\n mask_image,\n v_flux,\n simulation_00=False,\n double_sources=None,\n double_sources_positions_ratios=[0, 0],\n verbosity=0,\n explicit_psf_position=None,\n use_only_chi=False,\n use_center_of_flux=False):\n \"\"\"Move the image to find best position to downsample\n the oversampled image\n Parameters\n -----------------\n image: `np.array`, (N, N)\n model image to be analyzed\n (in our case this will be image of the\n optical psf convolved with fiber)\n oversampling: `int`\n oversampling\n size_natural_resolution: `int`\n size of final image (in the ``natural'' units, i.e., physical pixels\n on the detector)\n sci_image_0: `np.array`, (N, N)\n science image\n var_image_0: `np.array`, (N, N)\n variance image\n v_flux: `float`\n flux normalization\n simulation_00: `bool`\n if True,do not move the center, for making fair comparisons between\n models - optical center in places in the center of the image\n if use_center_of_flux==True the behaviour changes\n and the result is the image with center of flux\n in the center of the image\n double_sources: `bool`\n if True, fit for two sources seen in the data\n double_sources_positions_ratios: `np.array`, (2,)\n 2 values describing init guess for the relation between\n secondary and primary souces (offset, ratio)\n verbosity: `int`\n verbosity of the algorithm (1 for full verbosity)\n explicit_psf_position: `np.array`, (2,)\n x and y offset\n use_only_chi: `bool`\n quality of the centering is measured using chi, not chi**2\n use_center_of_flux: `bool`\n fit so that the center of flux of the model and\n the science image is as similar as possible\n Returns\n ----------\n model_image: `np.array`, (2,)\n returns model image in the size of the science image and\n centered to the science image\n (unless simulation_00=True or\n explicit_psf_position has been passed)\n Notes\n ----------\n Called by create_optPSF_natural in ZernikeFitterPFS\n Calls function create_complete_realization\n (many times in order to fit the best solution)\n \"\"\"\n self.sci_image = sci_image\n self.var_image = var_image\n self.mask_image = mask_image\n self.v_flux = v_flux\n\n # if you are just asking for simulated image at (0,0) there is no possibility to create double sources\n if simulation_00 == 1:\n double_sources = None\n\n if double_sources is None or double_sources is False:\n double_sources_positions_ratios = [0, 0]\n\n shape_of_input_img = input_image.shape[0]\n shape_of_sci_image = sci_image.shape[0]\n\n self.shape_of_input_img = shape_of_input_img\n self.shape_of_sci_image = shape_of_sci_image\n\n if verbosity == 1:\n logging.info('Parameter use_only_chi in Psf_postion is set to: ' + str(use_only_chi))\n logging.info('Parameter use_center_of_flux in Psf_postion is set to: ' + str(use_center_of_flux))\n logging.info('Parameter simulation_00 in Psf_postion is set to: ' + str(simulation_00))\n\n # depending on if there is a second source in the image split here\n # double_sources should always be None when when creating centered images (simulation_00 = True)\n if double_sources is None or bool(double_sources) is False:\n # if simulation_00 AND using optical center just run the realization that is set at 0,0\n if simulation_00 == 1 and use_center_of_flux is False:\n if verbosity == 1:\n logging.info('simulation_00 is set to 1 and use_center_of_flux==False -\\\n I am just returning the image at (0,0) coordinates ')\n\n # return the solution with x and y is zero, i.e., with optical center in\n # the center of the image\n mean_res, single_realization_primary_renormalized, single_realization_secondary_renormalized,\\\n complete_realization_renormalized \\\n = self.create_complete_realization([0, 0], return_full_result=True,\n use_only_chi=use_only_chi,\n use_center_of_light=use_center_of_flux,\n simulation_00=simulation_00)\n\n # if you are fitting an actual image go through the full process\n else:\n # if you did not pass explict position search for the best position\n if explicit_psf_position is None:\n # if creating the model so that the model is centered so\n # that center of light of the model matches the center of the light\n # of the scientific image, manually change values for centroid_of_sci_image here\n if simulation_00 == 1 and use_center_of_flux:\n if self.verbosity == 1:\n logging.info('creating simulated image, center of light in center of the image')\n shape_of_sci_image = 21\n centroid_of_sci_image = [10.5, 10.5]\n else:\n # create one complete realization with default parameters - estimate\n # centorids and use that knowledge to put fitting limits in the next step\n centroid_of_sci_image = find_centroid_of_flux(sci_image)\n\n time_1 = time.time()\n initial_complete_realization = self.create_complete_realization(\n [0, 0, double_sources_positions_ratios[0] * self.oversampling,\n double_sources_positions_ratios[1]],\n return_full_result=True,\n use_only_chi=use_only_chi,\n use_center_of_light=use_center_of_flux,\n simulation_00=simulation_00)[-1]\n time_2 = time.time()\n if self.verbosity == 1:\n logging.info('time_2-time_1 for initial_complete_realization: '\n + str(time_2 - time_1))\n\n # center of the light for the first realization, set at optical center\n centroid_of_initial_complete_realization = find_centroid_of_flux(\n initial_complete_realization)\n\n # determine offset between the initial guess and the data\n offset_initial_and_sci = - \\\n ((np.array(find_centroid_of_flux(initial_complete_realization))\n - np.array(find_centroid_of_flux(sci_image))))\n\n if verbosity == 1:\n logging.info('centroid_of_initial_complete_realization '\n + str(find_centroid_of_flux(initial_complete_realization)))\n logging.info('centroid_of_sci_image '+str(find_centroid_of_flux(sci_image)))\n logging.info('offset_initial_and_sci: ' + str(offset_initial_and_sci))\n logging.info('[x_primary, y_primary, y_secondary,ratio_secondary] / chi2 output')\n if self.save == 1:\n np.save(self.TESTING_FINAL_IMAGES_FOLDER\n + 'initial_complete_realization', initial_complete_realization)\n\n # search for the best center using scipy ``shgo'' algorithm\n # set the limits for the fitting procedure\n y_2sources_limits = [\n (offset_initial_and_sci[1] - 2) * self.oversampling,\n (offset_initial_and_sci[1] + 2) * self.oversampling]\n x_2sources_limits = [\n (offset_initial_and_sci[0] - 1) * self.oversampling,\n (offset_initial_and_sci[0] + 1) * self.oversampling]\n # search for best positioning\n\n # if use_center_of_flux==True, we use more direct approach to get to the center\n\n if use_center_of_flux:\n for i in range(5):\n if verbosity == 1:\n logging.info(\"###\")\n\n if i == 0:\n\n x_i, y_i = offset_initial_and_sci * oversampling\n\n x_offset, y_offset = 0, 0\n x_offset = x_offset + x_i\n y_offset = y_offset + y_i\n else:\n x_offset = x_offset + x_i\n y_offset = y_offset + y_i\n # complete_realization=self.create_complete_realization(x=[x_offset,y_offset,0,0,],\\\n # return_full_result=True,use_only_chi=True,use_center_of_light=True,simulation_00=False)[-1]\n complete_realization = self.create_complete_realization(\n x=[x_offset, y_offset, 0, 0, ], return_full_result=True, use_only_chi=True,\n use_center_of_light=True, simulation_00=simulation_00)[-1]\n offset_initial_and_sci = -((np.array(find_centroid_of_flux(complete_realization))\n - np.array(find_centroid_of_flux(sci_image))))\n if verbosity == 1:\n logging.info('offset_initial_and_sci in step '\n + str(i) + ' ' + str(offset_initial_and_sci))\n logging.info(\"###\")\n x_i, y_i = offset_initial_and_sci * oversampling\n\n primary_position_and_ratio_x = [x_offset, y_offset]\n # if use_center_of_flux=False, we have to optimize to find the best solution\n else:\n # implement try syntax for secondary too\n try:\n # logging.info('simulation_00 here is: '+str(simulation_00))\n # logging.info('(False, use_only_chi,use_center_of_flux)' +\n # str((False, use_only_chi, use_center_of_flux)))\n # logging.info('x_2sources_limits' + str(x_2sources_limits))\n # logging.info('y_2sources_limits' + str(y_2sources_limits))\n primary_position_and_ratio_shgo = scipy.optimize.shgo(\n self.create_complete_realization,\n args=(\n False,\n use_only_chi,\n use_center_of_flux,\n simulation_00),\n bounds=[\n (x_2sources_limits[0],\n x_2sources_limits[1]),\n (y_2sources_limits[0],\n y_2sources_limits[1])],\n n=10,\n sampling_method='sobol',\n options={\n 'ftol': 1e-3,\n 'maxev': 10})\n\n # primary_position_and_ratio=primary_position_and_ratio_shgo\n primary_position_and_ratio = scipy.optimize.minimize(\n self.create_complete_realization,\n args=(\n False,\n use_only_chi,\n use_center_of_flux,\n simulation_00),\n x0=primary_position_and_ratio_shgo.x,\n method='Nelder-Mead',\n options={\n 'xatol': 0.00001,\n 'fatol': 0.00001})\n\n primary_position_and_ratio_x = primary_position_and_ratio.x\n except BaseException as e:\n logging.info(e)\n logging.info('search for primary position failed')\n primary_position_and_ratio_x = [0, 0]\n\n # return the best result, based on the result of the conducted search\n mean_res, single_realization_primary_renormalized,\\\n single_realization_secondary_renormalized, complete_realization_renormalized \\\n = self.create_complete_realization(primary_position_and_ratio_x,\n return_full_result=True,\n use_only_chi=use_only_chi,\n use_center_of_light=use_center_of_flux,\n simulation_00=simulation_00)\n\n if self.save == 1:\n np.save(\n self.TESTING_FINAL_IMAGES_FOLDER\n + 'single_realization_primary_renormalized',\n single_realization_primary_renormalized)\n np.save(\n self.TESTING_FINAL_IMAGES_FOLDER\n + 'single_realization_secondary_renormalized',\n single_realization_secondary_renormalized)\n np.save(\n self.TESTING_FINAL_IMAGES_FOLDER\n + 'complete_realization_renormalized',\n complete_realization_renormalized)\n\n if self.verbosity == 1:\n if simulation_00 != 1:\n logging.info('We are fitting for only one source')\n logging.info('One source fitting result is ' + str(primary_position_and_ratio_x))\n logging.info('type(complete_realization_renormalized)'\n + str(type(complete_realization_renormalized[0][0])))\n\n centroid_of_complete_realization_renormalized = find_centroid_of_flux(\n complete_realization_renormalized)\n\n # determine offset between the initial guess and the data\n offset_final_and_sci = - \\\n (np.array(centroid_of_complete_realization_renormalized)\n - np.array(centroid_of_sci_image))\n\n logging.info('offset_final_and_sci: ' + str(offset_final_and_sci))\n\n return complete_realization_renormalized, primary_position_and_ratio_x\n\n # if you did pass explicit_psf_position for the solution evalute the code here\n else:\n mean_res, single_realization_primary_renormalized,\\\n single_realization_secondary_renormalized, complete_realization_renormalized\\\n = self.create_complete_realization(explicit_psf_position,\n return_full_result=True,\n use_only_chi=use_only_chi,\n use_center_of_light=use_center_of_flux)\n\n if self.save == 1:\n np.save(\n self.TESTING_FINAL_IMAGES_FOLDER + 'single_realization_primary_renormalized',\n single_realization_primary_renormalized)\n np.save(\n self.TESTING_FINAL_IMAGES_FOLDER + 'single_realization_secondary_renormalized',\n single_realization_secondary_renormalized)\n np.save(\n self.TESTING_FINAL_IMAGES_FOLDER + 'complete_realization_renormalized',\n complete_realization_renormalized)\n\n if self.verbosity == 1:\n if simulation_00 != 1:\n logging.info('We are passing value for only one source')\n logging.info('One source fitting result is ' + str(explicit_psf_position))\n logging.info('type(complete_realization_renormalized)'\n + str(type(complete_realization_renormalized[0][0])))\n\n return complete_realization_renormalized, explicit_psf_position\n\n else:\n # TODO: need to make possible that you can pass your own values for double source!!!!\n # create one complete realization with default parameters - estimate\n # centroids and use that knowledge to put fitting limits in the next step\n centroid_of_sci_image = find_centroid_of_flux(sci_image)\n initial_complete_realization = self.create_complete_realization([0,\n 0,\n double_sources_positions_ratios[0] # noqa: E501\n * self.oversampling,\n double_sources_positions_ratios[1]], # noqa: E501\n return_full_result=True,\n use_only_chi=use_only_chi,\n use_center_of_light= # noqa: E251\n use_center_of_flux,\n simulation_00=simulation_00)[-1]\n centroid_of_initial_complete_realization = find_centroid_of_flux(initial_complete_realization)\n\n # determine offset between the initial guess and the data\n offset_initial_and_sci = - \\\n (np.array(centroid_of_initial_complete_realization) - np.array(centroid_of_sci_image))\n\n if verbosity == 1:\n\n logging.info('Evaulating double source psf positioning loop')\n logging.info('offset_initial_and_sci: ' + str(offset_initial_and_sci))\n logging.info('[x_primary, y_primary, y_secondary,ratio_secondary] / chi2 output')\n\n if self.save == 1:\n np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'sci_image', sci_image)\n np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'initial_complete_realization',\n initial_complete_realization)\n\n # implement that it does not search if second object far away while in focus\n # focus size is 20\n if shape_of_sci_image == 20 and np.abs(self.double_sources_positions_ratios[0]) > 15:\n if verbosity == 1:\n logging.info('fitting second source, but assuming that second source is too far')\n\n # if the second spot is more than 15 pixels away\n # copying code from the non-double source part\n # search for the best center using scipy ``shgo'' algorithm\n # set the limits for the fitting procedure\n y_2sources_limits = [\n (offset_initial_and_sci[1] - 2) * self.oversampling,\n (offset_initial_and_sci[1] + 2) * self.oversampling]\n x_2sources_limits = [\n (offset_initial_and_sci[0] - 1) * self.oversampling,\n (offset_initial_and_sci[0] + 1) * self.oversampling]\n # search for best positioning\n # implement try for secondary too\n try:\n # logging.info('(False,use_only_chi,use_center_of_flux)'+str((False,use_only_chi,use_center_of_flux)))\n primary_position_and_ratio_shgo = scipy.optimize.shgo(\n self.create_complete_realization,\n args=(\n False,\n use_only_chi,\n use_center_of_flux,\n simulation_00),\n bounds=[\n (x_2sources_limits[0],\n x_2sources_limits[1]),\n (y_2sources_limits[0],\n y_2sources_limits[1])],\n n=10,\n sampling_method='sobol',\n options={\n 'ftol': 1e-3,\n 'maxev': 10})\n\n if verbosity == 1:\n logging.info('starting finer positioning')\n\n # primary_position_and_ratio=primary_position_and_ratio_shgo\n primary_position_and_ratio = scipy.optimize.minimize(\n self.create_complete_realization,\n args=(\n False,\n use_only_chi,\n use_center_of_flux,\n simulation_00),\n x0=primary_position_and_ratio_shgo.x,\n method='Nelder-Mead',\n options={\n 'xatol': 0.00001,\n 'fatol': 0.00001})\n\n primary_position_and_ratio_x = primary_position_and_ratio.x\n except BaseException:\n logging.info('search for primary position failed')\n primary_position_and_ratio_x = [0, 0]\n\n primary_secondary_position_and_ratio_x = np.array([0., 0., 0., 0.])\n primary_secondary_position_and_ratio_x[0] = primary_position_and_ratio_x[0]\n primary_secondary_position_and_ratio_x[1] = primary_position_and_ratio_x[1]\n\n else:\n\n # set the limits for the fitting procedure\n y_2sources_limits = [\n (offset_initial_and_sci[1] - 2) * self.oversampling,\n (offset_initial_and_sci[1] + 2) * self.oversampling]\n x_2sources_limits = [\n (offset_initial_and_sci[0] - 1) * self.oversampling,\n (offset_initial_and_sci[0] + 1) * self.oversampling]\n y_2sources_limits_second_source = [\n (self.double_sources_positions_ratios[0] - 2) * oversampling,\n (self.double_sources_positions_ratios[0] + 2) * oversampling]\n\n # search for best result\n # x position, y_position_1st, y_position_2nd, ratio\n\n primary_secondary_position_and_ratio = scipy.optimize.shgo(\n self.create_complete_realization,\n args=(\n False,\n use_only_chi,\n use_center_of_flux,\n simulation_00),\n bounds=[\n (x_2sources_limits[0],\n x_2sources_limits[1]),\n (y_2sources_limits[0],\n y_2sources_limits[1]),\n (y_2sources_limits_second_source[0],\n y_2sources_limits_second_source[1]),\n (self.double_sources_positions_ratios[1] / 2,\n 2 * self.double_sources_positions_ratios[1])],\n n=10,\n sampling_method='sobol',\n options={\n 'ftol': 1e-3,\n 'maxev': 10})\n\n primary_secondary_position_and_ratio_x = primary_secondary_position_and_ratio.x\n\n # primary_secondary_position_and_ratio=scipy.optimize.shgo(self.create_complete_realization,(False,use_only_chi,use_center_of_flux),bounds=\\\n # [(x_2sources_limits[0],x_2sources_limits[1]),(y_2sources_limits[0],y_2sources_limits[1]),\\\n # (y_2sources_limits_second_source[0],y_2sources_limits_second_source[1]),\\\n # (self.double_sources_positions_ratios[1]/2,2*self.double_sources_positions_ratios[1])],n=10,sampling_method='sobol',\\\n # options={'maxev':10,'ftol':1e-3})\n\n # return best result\n # introduce best_result=True\n mean_res, single_realization_primary_renormalized,\n single_realization_secondary_renormalized, complete_realization_renormalized \\\n = self.create_complete_realization(primary_secondary_position_and_ratio_x,\n return_full_result=True, use_only_chi=use_only_chi,\n use_center_of_light=use_center_of_flux,\n simulation_00=simulation_00)\n\n if self.save == 1:\n np.save(\n self.TESTING_FINAL_IMAGES_FOLDER + 'single_realization_primary_renormalized',\n single_realization_primary_renormalized)\n np.save(\n self.TESTING_FINAL_IMAGES_FOLDER + 'single_realization_secondary_renormalized',\n single_realization_secondary_renormalized)\n np.save(\n self.TESTING_FINAL_IMAGES_FOLDER + 'complete_realization_renormalized',\n complete_realization_renormalized)\n\n if self.verbosity == 1:\n logging.info('We are fitting for two sources')\n logging.info('Two source fitting result is ' + str(primary_secondary_position_and_ratio_x))\n logging.info('type(complete_realization_renormalized)'\n + str(type(complete_realization_renormalized[0][0])))\n\n return complete_realization_renormalized, primary_secondary_position_and_ratio_x\n\n def create_complete_realization(\n self,\n x,\n return_full_result=False,\n use_only_chi=False,\n use_center_of_light=False,\n simulation_00=False):\n \"\"\"Create one complete downsampled realization of the image,\n from the full oversampled image\n Parameters\n ----------\n x: `np.array`, (4,)\n array contaning x_primary, y_primary,\n offset in y to secondary source, \\\n ratio in flux from secondary to primary;\n the units are oversampled pixels\n return_full_result: `bool`\n if True, returns the images itself (not just chi**2)\n use_only_chi: `bool`\n if True, minimize chi; if False, minimize chi^2\n use_center_of_light: `bool`\n if True, minimize distance to center of light, in focus\n simulation_00: `bool`\n if True,do not move the center, for making fair comparisons between\n models - optical center in places in the center of the image\n if use_center_of_light==True the behaviour changes\n and the result is the image with center of flux\n in the center of the image\n Returns\n ----------\n chi_2_almost_multi_values: `float`\n returns the measure of quality\n (chi**2, chi, or distance of center\n of light between science and model image)\n distance of center of light between science\n and model image is given in units of pixels\n single_primary_realization_renormalized: `np.array`, (N, N)\n image containg the model corresponding\n to the primary source in the science image\n single_secondary_realization_renormalized: `np.array`, (N, N)\n image containg the model corresponding\n to the secondary source in the science image\n complete_realization_renormalized: `np.array`, (N, N)\n image combining the primary\n and secondary source (if secondary source is needed)\n Notes\n ----------\n TODO: implement that you are able to call outside find_single_realization_min_cut\n Called by find_single_realization_min_cut\n Calls create_chi_2_almost_Psf_position\n \"\"\"\n # oversampled input image\n image = self.image\n\n sci_image = self.sci_image\n var_image = self.var_image\n mask_image = self.mask_image\n shape_of_sci_image = self.size_natural_resolution\n\n oversampling = self.oversampling\n v_flux = self.v_flux\n\n # central position of the create oversampled image\n center_position = int(np.floor(image.shape[0] / 2))\n # to be applied on x-axis\n primary_offset_axis_1 = x[0]\n # to be applied on y-axis\n primary_offset_axis_0 = x[1]\n if simulation_00 == 1:\n simulation_00 = True\n\n # if you are only fitting for primary image\n # add zero values for secondary image\n if len(x) == 2:\n ratio_secondary = 0\n else:\n ratio_secondary = x[3]\n\n if len(x) == 2:\n secondary_offset_axis_1 = 0\n secondary_offset_axis_0 = 0\n else:\n secondary_offset_axis_1 = primary_offset_axis_1\n secondary_offset_axis_0 = x[2] + primary_offset_axis_0\n\n shape_of_oversampled_image = int(shape_of_sci_image * oversampling / 2)\n\n # from https://github.com/Subaru-PFS/drp_stella/blob/\\\n # 6cceadfc8721fcb1c7eb1571cf4b9bc8472e983d/src/SpectralPsf.cc\n # // Binning by an odd factor requires the centroid at the center of a pixel.\n # // Binning by an even factor requires the centroid on the edge of a pixel.\n\n # the definitions used in primary image\n # we separate if the image shape is odd or even, but at the moment there is no difference\n if np.modf(shape_of_oversampled_image / 2)[0] == 0.0:\n # logging.info('shape is an even number')\n shift_x_mod = np.array(\n [-(np.round(primary_offset_axis_1) - primary_offset_axis_1),\n -np.round(primary_offset_axis_1)])\n shift_y_mod = np.array(\n [-(np.round(primary_offset_axis_0) - primary_offset_axis_0),\n -np.round(primary_offset_axis_0)])\n else:\n # logging.info('shape is an odd number')\n shift_x_mod = np.array(\n [-(np.round(primary_offset_axis_1) - primary_offset_axis_1),\n -np.round(primary_offset_axis_1)])\n shift_y_mod = np.array(\n [-(np.round(primary_offset_axis_0) - primary_offset_axis_0),\n -np.round(primary_offset_axis_0)])\n\n image_integer_offset = image[center_position\n + int(shift_y_mod[1]) - 1\n - shape_of_oversampled_image:center_position\n + int(shift_y_mod[1])\n + shape_of_oversampled_image + 1,\n center_position\n + int(shift_x_mod[1]) - 1\n - shape_of_oversampled_image: center_position\n + int(shift_x_mod[1])\n + shape_of_oversampled_image + 1]\n if simulation_00:\n image_integer_offset = image[center_position\n + int(shift_y_mod[1]) - 1\n - shape_of_oversampled_image:center_position\n + int(shift_y_mod[1])\n + shape_of_oversampled_image + 1 + 1,\n center_position\n + int(shift_x_mod[1]) - 1\n - shape_of_oversampled_image: center_position\n + int(shift_x_mod[1])\n + shape_of_oversampled_image + 1 + 1]\n logging.info('image_integer_offset shape: ' + str(image_integer_offset.shape))\n\n image_integer_offset_lsst = lsst.afw.image.image.ImageD(image_integer_offset.astype('float64'))\n\n oversampled_Image_LSST_apply_frac_offset = lsst.afw.math.offsetImage(\n image_integer_offset_lsst, shift_x_mod[0], shift_y_mod[0], algorithmName='lanczos5', buffer=5)\n\n single_primary_realization_oversampled = oversampled_Image_LSST_apply_frac_offset.array[1:-1, 1:-1]\n\n # logging.info('single_primary_realization_oversampled.shape[0]: '+\n # str(single_primary_realization_oversampled.shape[0]))\n # logging.info('shape_of_sci_image: '+str(shape_of_sci_image))\n # logging.info('oversampling: '+str(oversampling))\n\n assert single_primary_realization_oversampled.shape[0] == shape_of_sci_image * oversampling\n\n single_primary_realization = resize(\n single_primary_realization_oversampled, (shape_of_sci_image, shape_of_sci_image), ())\n\n # im1= galsim.Image(image, copy=True,scale=1)\n # time_2=time.time()\n # interpolated_image = galsim._InterpolatedImage(im1,\\\n # x_interpolant=galsim.Lanczos(5, True))\n # time_3=time.time()\n # time_3_1=time.time()\n # single_primary_realization_oversampled_1 =\n # interpolated_image.shift(primary_offset_axis_1,primary_offset_axis_0 )\n # time_3_2=time.time()\n # single_primary_realization_oversampled_2=single_primary_realization_oversampled_1.drawImage\\\n # (nx=shape_of_sci_image*oversampling, ny=shape_of_sci_image*oversampling, scale=1, method='no_pixel')\n # time_3_3=time.time()\n # single_primary_realization_oversampled_3=single_primary_realization_oversampled_2.array\n # time_4=time.time()\n # single_primary_realization = resize(single_primary_realization_oversampled_3,\\\n # (shape_of_sci_image,shape_of_sci_image),())\n # time_5=time.time()\n # if self.verbosity==1:\n # logging.info('time_2-time_1 for shift and resize '+str(time_2-time_1))\n # logging.info('time_3-time_2 for shift and resize '+str(time_3-time_2))\n # logging.info('time_3_1-time_3 for shift and resize '+str(time_3_1-time_3))\n # logging.info('time_3_2-time_3_1 for shift and resize '+str(time_3_2-time_3_1))\n # logging.info('time_3_3-time_3_2 for shift and resize '+str(time_3_3-time_3_2))\n # logging.info('time_4-time_3_3 for shift and resize '+str(time_4-time_3_3))\n # logging.info('time_4-time_3 for shift and resize '+str(time_4-time_3))\n # logging.info('time_5-time_4 for shift and resize '+str(time_5-time_4))\n # logging.info('time_5-time_1 for shift and resize '+str(time_5-time_1))\n\n ###################\n # skip this part if only doing primary\n # go through secondary loop if the flux ratio is not zero\n # (needs to be implemented - if secondary too far outside the image, do not go through secondary)\n if ratio_secondary != 0:\n\n # overloading the definitions used in primary image\n if np.modf(shape_of_oversampled_image / 2)[0] == 0.0:\n # logging.info('shape is an even number')\n\n shift_x_mod = np.array(\n [-(np.round(secondary_offset_axis_1) - secondary_offset_axis_1),\n -np.round(secondary_offset_axis_1)])\n shift_y_mod = np.array(\n [-(np.round(secondary_offset_axis_0) - secondary_offset_axis_0),\n -np.round(secondary_offset_axis_0)])\n\n else:\n # logging.info('shape is an odd number')\n shift_x_mod = np.array(\n [-(np.round(secondary_offset_axis_1) - secondary_offset_axis_1),\n -np.round(secondary_offset_axis_1)])\n shift_y_mod = np.array(\n [-(np.round(secondary_offset_axis_0) - secondary_offset_axis_0),\n -np.round(secondary_offset_axis_0)])\n\n image_integer_offset = image[center_position\n + int(shift_y_mod[1]) - 1\n - shape_of_oversampled_image:center_position\n + int(shift_y_mod[1])\n + shape_of_oversampled_image + 2,\n center_position\n + int(shift_x_mod[1]) - 1\n - shape_of_oversampled_image: center_position\n + int(shift_x_mod[1])\n + shape_of_oversampled_image + 2]\n\n image_integer_offset_lsst = lsst.afw.image.image.ImageD(image_integer_offset.astype('float64'))\n\n oversampled_Image_LSST_apply_frac_offset = lsst.afw.math.offsetImage(\n image_integer_offset_lsst, shift_y_mod[0], shift_x_mod[0], algorithmName='lanczos5', buffer=5)\n\n single_secondary_realization_oversampled =\\\n oversampled_Image_LSST_apply_frac_offset.array[1:-1, 1:-1]\n\n single_secondary_realization = resize(\n single_secondary_realization_oversampled, (shape_of_sci_image, shape_of_sci_image), ())\n\n inverted_mask = ~mask_image.astype(bool)\n\n ###################\n # create complete_realization which is just pimary if no secondary source\n # if there is secondary source, add two images together\n if ratio_secondary != 0:\n complete_realization = single_primary_realization + ratio_secondary * single_secondary_realization\n complete_realization_renormalized = complete_realization * \\\n (np.sum(sci_image[inverted_mask]) * v_flux / np.sum(complete_realization[inverted_mask]))\n else:\n\n complete_realization = single_primary_realization\n complete_realization_renormalized = complete_realization * \\\n (np.sum(sci_image[inverted_mask]) * v_flux / np.sum(complete_realization[inverted_mask]))\n\n ###################\n # find chi values and save the results\n # logging.info('checkpoint in create_complete_realization')\n if not return_full_result:\n # time_1 = time.time()\n chi_2_almost_multi_values = self.create_chi_2_almost_Psf_position(\n complete_realization_renormalized,\n sci_image,\n var_image,\n mask_image,\n use_only_chi=use_only_chi,\n use_center_of_light=use_center_of_light,\n simulation_00=simulation_00)\n # time_2 = time.time()\n if self.verbosity == 1:\n logging.info(\n 'chi2 within shgo with use_only_chi ' + str(use_only_chi)\n + ' and use_center_of_light ' + str(use_center_of_light) + ' ' + str(x) + ' / '\n + str(chi_2_almost_multi_values))\n # logging.info('time_2-time_1 for create_chi_2_almost_Psf_position: '+str(time_2-time_1))\n return chi_2_almost_multi_values\n else:\n if ratio_secondary != 0:\n # logging.info('ratio_secondary 2nd loop: '+str(ratio_secondary))\n single_primary_realization_renormalized = single_primary_realization * \\\n (np.sum(sci_image[inverted_mask]) * v_flux / np.sum(complete_realization[inverted_mask]))\n single_secondary_realization_renormalized = ratio_secondary * single_secondary_realization * \\\n (np.sum(sci_image[inverted_mask]) * v_flux / np.sum(complete_realization[inverted_mask]))\n else:\n # logging.info('ratio_secondary 2nd loop 0: '+str(ratio_secondary))\n single_primary_realization_renormalized = single_primary_realization * \\\n (np.sum(sci_image[inverted_mask]) * v_flux / np.sum(complete_realization[inverted_mask]))\n single_secondary_realization_renormalized = np.zeros(\n single_primary_realization_renormalized.shape)\n\n if self.save == 1:\n np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'image', image)\n if ratio_secondary != 0:\n np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'image_full_for_secondary', image)\n np.save(self.TESTING_FINAL_IMAGES_FOLDER\n + 'single_secondary_realization', single_secondary_realization)\n np.save(self.TESTING_FINAL_IMAGES_FOLDER\n + 'single_primary_realization', single_primary_realization)\n np.save(self.TESTING_FINAL_IMAGES_FOLDER\n + 'single_primary_realization_renormalized_within_create_complete_realization',\n single_primary_realization_renormalized)\n np.save(self.TESTING_FINAL_IMAGES_FOLDER\n + 'single_secondary_realization_renormalized_within_create_complete_realization',\n single_secondary_realization_renormalized)\n np.save(self.TESTING_FINAL_IMAGES_FOLDER\n + 'complete_realization_renormalized_within_create_complete_realization',\n complete_realization_renormalized)\n\n # should I modify this function to remove distance from physcial center of\n # mass when using that option\n chi_2_almost_multi_values = self.create_chi_2_almost_Psf_position(\n complete_realization_renormalized,\n sci_image,\n var_image,\n mask_image,\n use_only_chi=use_only_chi,\n use_center_of_light=use_center_of_light,\n simulation_00=simulation_00)\n\n # if best, save oversampled image\n if simulation_00:\n if self.verbosity == 1:\n logging.info('saving oversampled simulation_00 image')\n # logging.info('I have to implement that again')\n logging.info('saving at ' + self.TESTING_FINAL_IMAGES_FOLDER\n + 'single_primary_realization_oversampled')\n np.save(self.TESTING_FINAL_IMAGES_FOLDER\n + 'single_primary_realization_oversampled_to_save',\n single_primary_realization_oversampled)\n np.save(self.TESTING_FINAL_IMAGES_FOLDER\n + 'complete_realization_renormalized_to_save',\n single_primary_realization_oversampled)\n\n return chi_2_almost_multi_values,\\\n single_primary_realization_renormalized, single_secondary_realization_renormalized,\\\n complete_realization_renormalized\n\n def create_chi_2_almost_Psf_position(self, modelImg, sci_image, var_image, mask_image,\n use_only_chi=False, use_center_of_light=False, simulation_00=False):\n \"\"\"Returns quality of the model's fit compared to the science image\n Parameters\n ----------\n modelImg: `np.array`, (N, N)\n model image\n sci_image: `np.array`, (N, N)\n science image\n var_image: `np.array`, (N, N)\n variance image\n mask_image: `np.array`, (N, N)\n mask image\n use_only_chi: `bool`\n if True, minimize chi; if False, minimize chi^2\n use_center_of_light: `bool`\n if True, minimizes distance of center of light between science\n and model image\n simulation_00: `bool`\n if True,do not move the center, for making fair comparisons between\n models - optical center in places in the center of the image\n if use_center_of_light==True the behaviour changes\n and the result is the image with center of flux\n in the center of the image\n Returns\n ----------\n measure_of_quality: `float`\n returns the measure of quality\n (chi**2, chi, or distance of center\n of light between science and model image)\n distance of center of light between science\n and model image is given in units of pixels\n Notes\n ----------\n Called by create_complete_realization\n \"\"\"\n inverted_mask = ~mask_image.astype(bool)\n\n var_image_masked = var_image * inverted_mask\n sci_image_masked = sci_image * inverted_mask\n modelImg_masked = modelImg * inverted_mask\n\n # if you are minimizing chi or chi**2\n if not use_center_of_light:\n if not use_only_chi:\n chi2 = (sci_image_masked - modelImg_masked)**2 / var_image_masked\n chi2nontnan = chi2[~np.isnan(chi2)]\n if use_only_chi:\n chi2 = np.abs((sci_image_masked - modelImg_masked))**1 / np.sqrt(var_image_masked)\n chi2nontnan = chi2[~np.isnan(chi2)]\n return np.mean(chi2nontnan)\n else:\n if simulation_00 is False or simulation_00 is None:\n if self.verbosity == 1:\n logging.info('sim00=False and center of light =true')\n\n distance_of_flux_center = np.sqrt(\n np.sum((np.array(\n find_centroid_of_flux(modelImg_masked))\n - np.array(\n find_centroid_of_flux(sci_image_masked)))**2))\n else:\n # if you pass both simulation_00 paramter and use_center_of_light=True,\n # center of light will be centered\n # in the downsampled image\n if self.verbosity == 1:\n logging.info('sim00=True and center of light =true')\n\n distance_of_flux_center = np.sqrt(\n np.sum((np.array(find_centroid_of_flux(modelImg_masked))\n - np.array(np.array(np.ones((21, 21)).shape)\n / 2 - 0.5))**2))\n # logging.info('distance_of_flux_center: '+str(distance_of_flux_center))\n return distance_of_flux_center\n\n def fill_crop(self, img, pos, crop):\n '''\n Fills `crop` with values from `img` at `pos`,\n while accounting for the crop being off the edge of `img`.\n *Note:* negative values in `pos` are interpreted as-is, not as \"from the end\".\n Taken from https://stackoverflow.com/questions/41153803/zero-padding-slice-past-end-of-array-in-numpy # noqa:E501\n '''\n img_shape, pos, crop_shape = np.array(\n img.shape, dtype=int), np.array(\n pos, dtype=int), np.array(\n crop.shape, dtype=int)\n end = pos + crop_shape\n # Calculate crop slice positions\n crop_low = np.clip(0 - pos, a_min=0, a_max=crop_shape)\n crop_high = crop_shape - np.clip(end - img_shape, a_min=0, a_max=crop_shape)\n crop_slices = (slice(low, high) for low, high in zip(crop_low, crop_high))\n # Calculate img slice positions\n pos = np.clip(pos, a_min=0, a_max=img_shape)\n end = np.clip(end, a_min=0, a_max=img_shape)\n img_slices = (slice(low, high) for low, high in zip(pos, end))\n try:\n crop[tuple(crop_slices)] = img[tuple(img_slices)]\n except TypeError:\n logging.info('TypeError in fill_crop function')\n pass\n\n\nclass Zernike_estimation_preparation(object):\n \"\"\"\n Class that creates the inputs for the Zernike parameter estimation\n\n Parameters\n ----------\n list_of_obs : `list`\n list of observations\n list_of_spots :\n list of spots to be analyzed\n dataset : int\n\n list_of_arc : `list`\n list of arcs to be analyzed\n\n ...\n\n\n Returns\n ----------\n array_of_array_of_polyfit_1_parameterizations_proposal_shape_2d : `np.array`\n array of the initial parametrization proposals\n input for ?\n array_of_sci_images_multi_spot : `np.array`\n array of science images\n array_of_var_images_multi_spot : `np.array`\n array of var images\n array_of_mask_images_multi_spot : `np.array`\n array of mask images\n\n \"\"\"\n\n def __init__(self, list_of_labelInput, list_of_spots, dataset,\n list_of_arc, eps, nsteps, analysis_type='defocus',\n analysis_type_fiber=None):\n\n self.list_of_labelInput = list_of_labelInput\n self.list_of_spots = list_of_spots\n self.dataset = dataset\n self.list_of_arc = list_of_arc\n self.eps = eps\n self.nsteps = nsteps\n self.analysis_type = analysis_type\n self.analysis_type_fiber = analysis_type_fiber\n\n # TODO : make this as input or deduce from the data\n self.multi_var = True\n\n logging.info('Dataset analyzed is: ' + str(dataset))\n if dataset == 0 or dataset == 1:\n logging.info('ehm.... old data, not analyzed')\n\n # folder contaning the data from February 2019\n # dataset 1\n # DATA_FOLDER='/tigress/ncaplar/Data/Feb5Data/'\n\n # folder containing the data taken with F/2.8 stop in April and May 2019\n # dataset 2\n if dataset == 2:\n DATA_FOLDER = '/tigress/ncaplar/ReducedData/Data_May_28/'\n\n # folder containing the data taken with F/2.8 stop in April and May 2019\n # dataset 3\n if dataset == 3:\n DATA_FOLDER = '/tigress/ncaplar/ReducedData/Data_Jun_25/'\n\n # folder containing the data taken with F/2.8 stop in July 2019\n # dataset 4 (defocu) and 5 (fine defocus)\n if dataset == 4 or dataset == 5:\n DATA_FOLDER = '/tigress/ncaplar/ReducedData/Data_Aug_14/'\n\n # folder contaning the data taken with F/2.8 stop in November 2020 on Subaru\n if dataset == 6:\n DATA_FOLDER = '/tigress/ncaplar/ReducedData/Data_Nov_20/'\n\n # folder contaning the data taken with F/2.8 stop in June 2021, at LAM, on SM2\n if dataset == 7:\n DATA_FOLDER = '/tigress/ncaplar/ReducedData/Data_May_21_2021/'\n\n # folder contaning the data taken with F/2.8 stop in June 2021, at Subaru\n # (21 fibers)\n if dataset == 8:\n if 'subaru' in socket.gethostname():\n DATA_FOLDER = '/work/ncaplar/ReducedData/Data_May_25_2021/'\n else:\n DATA_FOLDER = '/tigress/ncaplar/ReducedData/Data_May_25_2021/'\n\n STAMPS_FOLDER = DATA_FOLDER + 'Stamps_cleaned/'\n DATAFRAMES_FOLDER = DATA_FOLDER + 'Dataframes/'\n if 'subaru' in socket.gethostname():\n RESULT_FOLDER = '/work/ncaplar/Results/'\n else:\n RESULT_FOLDER = '/tigress/ncaplar/Results/'\n\n self.STAMPS_FOLDER = STAMPS_FOLDER\n self.DATAFRAMES_FOLDER = DATAFRAMES_FOLDER\n self.RESULT_FOLDER = RESULT_FOLDER\n\n if eps == 1:\n # particle count, c1 parameter (individual), c2 parameter (global)\n options = [390, 1.193, 1.193]\n if eps == 2:\n options = [790, 1.193, 1.193]\n nsteps = int(nsteps / 2)\n if eps == 3:\n options = [390, 1.593, 1.193]\n if eps == 4:\n options = [390, 0.993, 1.193]\n if eps == 5:\n options = [480, 2.793, 0.593]\n if eps == 6:\n options = [480, 2.793, 1.193]\n if eps == 7:\n options = [48, 2.793, 1.193]\n if eps == 8:\n options = [480, 2.793, 0.193]\n if eps == 9:\n options = [190, 1.193, 1.193]\n nsteps = int(2 * nsteps)\n if eps == 10:\n options = [390, 1.893, 2.893]\n\n particleCount = options[0]\n c1 = options[1]\n c2 = options[2]\n\n self.particleCount = particleCount\n self.options = options\n self.c1 = c1\n self.c2 = c2\n\n # names for paramters - names if we go up to z22\n columns22 = [\n 'z4',\n 'z5',\n 'z6',\n 'z7',\n 'z8',\n 'z9',\n 'z10',\n 'z11',\n 'z12',\n 'z13',\n 'z14',\n 'z15',\n 'z16',\n 'z17',\n 'z18',\n 'z19',\n 'z20',\n 'z21',\n 'z22',\n 'detFrac',\n 'strutFrac',\n 'dxFocal',\n 'dyFocal',\n 'slitFrac',\n 'slitFrac_dy',\n 'wide_0',\n 'wide_23',\n 'wide_43',\n 'misalign',\n 'x_fiber',\n 'y_fiber',\n 'effective_radius_illumination',\n 'frd_sigma',\n 'frd_lorentz_factor',\n 'det_vert',\n 'slitHolder_frac_dx',\n 'grating_lines',\n 'scattering_slope',\n 'scattering_amplitude',\n 'pixel_effect',\n 'fiber_r',\n 'flux']\n\n self.columns22 = columns22\n\n def return_auxiliary_info(self):\n \"\"\"\n\n\n Parameters\n ----------\n\n Return\n ----------\n\n\n \"\"\"\n\n return self.particleCount, self.c1, self.c2\n\n def create_list_of_obs_from_list_of_label(self):\n \"\"\"\n\n\n Parameters\n ----------\n\n Return\n ----------\n\n\n \"\"\"\n dataset = self.dataset\n list_of_labelInput = self.list_of_labelInput\n # arc = self.list_of_arc[0]\n\n logging.info('self.list_of_arc: '+str(self.list_of_arc))\n ################################################\n # (3.) import obs_pos & connect\n ################################################\n\n # What are the observations that can be analyzed\n # This information is used to associate observation with their input labels\n # (see definition of `label` below)\n # This is so that the initial parameters guess is correct\n\n list_of_obs_possibilites = []\n for arc in self.list_of_arc:\n\n # dataset 0, December 2017 data - possibly deprecated\n \"\"\"\n if arc == 'HgAr':\n obs_possibilites = np.array([8552, 8555, 8558, 8561, 8564, 8567, 8570, 8573,\n 8603, 8600, 8606, 8609, 8612, 8615, 8618, 8621, 8624, 8627])\n elif arc == 'Ne':\n logging.info('Neon?????')\n obs_possibilites = np.array([8552, 8555, 8558, 8561, 8564, 8567, 8570, 8573,\n 8603, 8600, 8606, 8609, 8612, 8615, 8618, 8621, 8624, 8627])+90\n \"\"\"\n\n # F/3.2 data\n if dataset == 1:\n if arc == 'HgAr':\n obs_possibilites = np.array([11796,\n 11790,\n 11784,\n 11778,\n 11772,\n 11766,\n 11760,\n 11754,\n 11748,\n 11694,\n 11700,\n 11706,\n 11712,\n 11718,\n 11724,\n 11730,\n 11736])\n elif arc == 'Ne':\n obs_possibilites = np.array([12403,\n 12397,\n 12391,\n 12385,\n 12379,\n 12373,\n 12367,\n 12361,\n 12355,\n 12349,\n 12343,\n 12337,\n 12331,\n 12325,\n 12319,\n 12313,\n 12307])\n\n # F/2.8 data\n if dataset == 2:\n if arc == 'HgAr':\n obs_possibilites = np.array([17023,\n 17023 + 6,\n 17023 + 12,\n 17023 + 18,\n 17023 + 24,\n 17023 + 30,\n 17023 + 36,\n 17023 + 42,\n 17023 + 48,\n 17023 + 54,\n 17023 + 60,\n 17023 + 66,\n 17023 + 72,\n 17023 + 78,\n 17023 + 84,\n 17023 + 90,\n 17023 + 96,\n 17023 + 48])\n if arc == 'Ne':\n obs_possibilites = np.array([16238 + 6,\n 16238 + 12,\n 16238 + 18,\n 16238 + 24,\n 16238 + 30,\n 16238 + 36,\n 16238 + 42,\n 16238 + 48,\n 16238 + 54,\n 16238 + 60,\n 16238 + 66,\n 16238 + 72,\n 16238 + 78,\n 16238 + 84,\n 16238 + 90,\n 16238 + 96,\n 16238 + 102,\n 16238 + 54])\n if arc == 'Kr':\n obs_possibilites = np.array([17310 + 6,\n 17310 + 12,\n 17310 + 18,\n 17310 + 24,\n 17310 + 30,\n 17310 + 36,\n 17310 + 42,\n 17310 + 48,\n 17310 + 54,\n 17310 + 60,\n 17310 + 66,\n 17310 + 72,\n 17310 + 78,\n 17310 + 84,\n 17310 + 90,\n 17310 + 96,\n 17310 + 102,\n 17310 + 54])\n\n # F/2.5 data\n if dataset == 3:\n if arc == 'HgAr':\n obs_possibilites = np.array([19238,\n 19238 + 6,\n 19238 + 12,\n 19238 + 18,\n 19238 + 24,\n 19238 + 30,\n 19238 + 36,\n 19238 + 42,\n 19238 + 48,\n 19238 + 54,\n 19238 + 60,\n 19238 + 66,\n 19238 + 72,\n 19238 + 78,\n 19238 + 84,\n 19238 + 90,\n 19238 + 96,\n 19238 + 48])\n elif arc == 'Ne':\n obs_possibilites = np.array([19472 + 6,\n 19472 + 12,\n 19472 + 18,\n 19472 + 24,\n 19472 + 30,\n 19472 + 36,\n 19472 + 42,\n 19472 + 48,\n 19472 + 54,\n 19472 + 60,\n 19472 + 66,\n 19472 + 72,\n 19472 + 78,\n 19472 + 84,\n 19472 + 90,\n 19472 + 96,\n 19472 + 102,\n 19472 + 54])\n\n # F/2.8 July data\n if dataset == 4:\n if arc == 'HgAr':\n obs_possibilites = np.array([21346 + 6,\n 21346 + 12,\n 21346 + 18,\n 21346 + 24,\n 21346 + 30,\n 21346 + 36,\n 21346 + 42,\n 21346 + 48,\n 21346 + 54,\n 21346 + 60,\n 21346 + 66,\n 21346 + 72,\n 21346 + 78,\n 21346 + 84,\n 21346 + 90,\n 21346 + 96,\n 21346 + 102,\n 21346 + 48])\n if arc == 'Ne':\n obs_possibilites = np.array([21550 + 6,\n 21550 + 12,\n 21550 + 18,\n 21550 + 24,\n 21550 + 30,\n 21550 + 36,\n 21550 + 42,\n 21550 + 48,\n 21550 + 54,\n 21550 + 60,\n 21550 + 66,\n 21550 + 72,\n 21550 + 78,\n 21550 + 84,\n 21550 + 90,\n 21550 + 96,\n 21550 + 102,\n 21550 + 54])\n if arc == 'Kr':\n obs_possibilites = np.array([21754 + 6,\n 21754 + 12,\n 21754 + 18,\n 21754 + 24,\n 21754 + 30,\n 21754 + 36,\n 21754 + 42,\n 21754 + 48,\n 21754 + 54,\n 21754 + 60,\n 21754 + 66,\n 21754 + 72,\n 21754 + 78,\n 21754 + 84,\n 21754 + 90,\n 21754 + 96,\n 21754 + 102,\n 21754 + 54])\n\n # F/2.8 data, Subaru\n if dataset == 6:\n if arc == 'Ar':\n obs_possibilites = np.array([34341,\n 34341 + 6,\n 34341 + 12,\n 34341 + 18,\n 34341 + 24,\n 34341 + 30,\n 34341 + 36,\n 34341 + 42,\n 34341 + 48,\n 34341 + 54,\n 34341 + 60,\n 34341 + 66,\n 34341 + 72,\n 34341 + 78,\n 34341 + 84,\n 34341 + 90,\n 34341 + 96,\n 21346 + 48])\n if arc == 'Ne':\n obs_possibilites = np.array([34217,\n 34217 + 6,\n 34217 + 12,\n 34217 + 18,\n 34217 + 24,\n 34217 + 30,\n 34217 + 36,\n 34217 + 42,\n 34217 + 48,\n 34217 + 54,\n 34217 + 60,\n 34217 + 66,\n 34217 + 72,\n 34217 + 78,\n 34217 + 84,\n 34217 + 90,\n 34217 + 96,\n 34217 + 48])\n if arc == 'Kr':\n obs_possibilites = np.array([34561,\n 34561 + 6,\n 34561 + 12,\n 34561 + 18,\n 34561 + 24,\n 34561 + 30,\n 34561 + 36,\n 34561 + 42,\n 34561 + 48,\n 34561 + 54,\n 34561 + 60,\n 34561 + 66,\n 34561 + 72,\n 34561 + 78,\n 34561 + 84,\n 34561 + 90,\n 34561 + 96,\n 34561 + 48])\n\n # SM2 test data\n if dataset == 7:\n if arc == 'Ar':\n obs_possibilites = np.array([27779,\n - 999,\n 27683,\n - 999,\n - 999,\n - 999,\n - 999,\n - 999,\n 27767,\n - 999,\n - 999,\n - 999,\n - 999,\n - 999,\n 27698,\n - 999,\n 27773,\n - 999])\n if arc == 'Ne':\n obs_possibilites = np.array([27713,\n - 999,\n 27683,\n - 999,\n - 999,\n - 999,\n - 999,\n - 999,\n 27677,\n - 999,\n - 999,\n - 999,\n - 999,\n - 999,\n 27698,\n - 999,\n 27719,\n - 999])\n # Krypton data not taken\n # if arc == 'Kr':\n # obs_possibilites = np.array([34561, 34561+6, 34561+12, 34561+18, 34561+24, 34561+30,\n # 34561+36, 34561+42, 34561+48,\n # 34561+54, 34561+60, 34561+66, 34561+72,\n # 34561+78, 34561+84, 34561+90, 34561+96, 34561+48])\n\n # 21 fibers data from May/Jun 2021, taken at Subaru\n if dataset == 8:\n if arc == 'Ar':\n obs_possibilites = np.array([51485,\n 51485 + 12,\n 51485 + 2 * 12,\n 51485 + 3 * 12,\n 51485 + 4 * 12,\n 51485 + 5 * 12,\n 51485 + 6 * 12,\n 51485 + 7 * 12,\n 51485 + 8 * 12,\n 51485 + 9 * 12,\n 51485 + 10 * 12,\n 51485 + 11 * 12,\n 51485 + 12 * 12,\n 51485 + 13 * 12,\n 51485 + 14 * 12,\n 51485 + 15 * 12,\n 51485 + 16 * 12,\n 51485 + 8 * 12])\n if arc == 'Ne':\n obs_possibilites = np.array([59655,\n 59655 + 12,\n 59655 + 2 * 12,\n 59655 + 3 * 12,\n 59655 + 4 * 12,\n 59655 + 5 * 12,\n 59655 + 6 * 12,\n 59655 + 7 * 12,\n 59655 + 8 * 12,\n 59655 + 9 * 12,\n 59655 + 10 * 12,\n 59655 + 11 * 12,\n 59655 + 12 * 12,\n 59655 + 13 * 12,\n 59655 + 14 * 12,\n 59655 + 15 * 12,\n 59655 + 16 * 12,\n 59655 + 8 * 12])\n if arc == 'Kr':\n obs_possibilites = np.array([52085,\n 52085 + 12,\n 52085 + 2 * 12,\n 52085 + 3 * 12,\n 52085 + 4 * 12,\n 52085 + 5 * 12,\n 52085 + 6 * 12,\n 52085 + 7 * 12,\n 52085 + 8 * 12,\n 52085 + 9 * 12,\n 52085 + 10 * 12,\n 52085 + 11 * 12,\n 52085 + 12 * 12,\n 52085 + 13 * 12,\n 52085 + 14 * 12,\n 52085 + 15 * 12,\n 52085 + 16 * 12,\n 52085 + 8 * 12])\n\n logging.info('arc: '+str(arc))\n logging.info('obs_possibilites: '+str(obs_possibilites))\n list_of_obs_possibilites.append(obs_possibilites)\n\n logging.info('list_of_obs_possibilites: '+str(list_of_obs_possibilites))\n ##############################################\n\n # associates each observation with the label\n # describing movement of the hexapod and rough estimate of z4\n z4Input_possibilites = np.array([28, 24.5, 21, 17.5, 14, 10.5, 7, 3.5, 0, # noqa F841\n -3.5, -7, -10.5, -14, -17.5, -21, -24.5, -28, 0])\n label = ['m4', 'm35', 'm3', 'm25', 'm2', 'm15', 'm1', 'm05', '0',\n 'p05', 'p1', 'p15', 'p2', 'p25', 'p3', 'p35', 'p4', '0p']\n\n list_of_obs_cleaned = []\n\n for a in range(len(self.list_of_arc)):\n obs_possibilites = list_of_obs_possibilites[a]\n list_of_obs = []\n for i in range(len(list_of_labelInput)):\n label_i = list_of_labelInput[i]\n obs_cleaned = obs_possibilites[list(label).index(label_i)]\n list_of_obs.append(obs_cleaned)\n list_of_obs_cleaned.append(list_of_obs)\n\n self.list_of_obs_cleaned = list_of_obs_cleaned\n # TODO: clean out this cheating here\n list_of_obs = list_of_obs_cleaned\n self.list_of_obs = list_of_obs\n self.list_of_obs_cleaned = list_of_obs_cleaned\n logging.info('self.list_of_obs:'+str(self.list_of_obs))\n logging.info('list_of_obs_cleaned:'+str(list_of_obs_cleaned))\n return list_of_obs_cleaned\n\n def get_sci_var_mask_data(self):\n \"\"\"\n Get sci, var and mask data\n\n Parameters\n ----------\n\n Return\n ----------\n\n\n \"\"\"\n\n STAMPS_FOLDER = self.STAMPS_FOLDER\n\n list_of_sci_images_multi_spot = []\n list_of_mask_images_multi_spot = []\n list_of_var_images_multi_spot = []\n list_of_obs_cleaned_multi_spot = []\n\n self.create_list_of_obs_from_list_of_label()\n\n logging.info('list_of_obs_cleaned'+str(self.list_of_obs_cleaned))\n for s in range(len(self.list_of_spots)):\n arc = self.list_of_arc[s]\n single_number = self.list_of_spots[s]\n\n list_of_sci_images = []\n list_of_mask_images = []\n list_of_var_images = []\n list_of_obs_cleaned = []\n # list_of_times = []\n\n # loading images for the analysis\n for obs in self.list_of_obs_cleaned[s]:\n try:\n sci_image = np.load(\n STAMPS_FOLDER + 'sci' + str(obs)\n + str(single_number) + str(arc) + '_Stacked.npy')\n mask_image = np.load(\n STAMPS_FOLDER + 'mask' + str(obs)\n + str(single_number) + str(arc) + '_Stacked.npy')\n var_image = np.load(\n STAMPS_FOLDER + 'var' + str(obs)\n + str(single_number) + str(arc) + '_Stacked.npy')\n logging.info(\n 'sci_image loaded from: ' + STAMPS_FOLDER + 'sci'\n + str(obs) + str(single_number) + str(arc) + '_Stacked.npy')\n except Exception:\n # change to that code does not fail and hang if the image is not found\n # this will lead to pass statment in next step because\n # np.sum(sci_image) = 0\n logging.info('sci_image not found')\n sci_image = np.zeros((20, 20))\n var_image = np.zeros((20, 20))\n mask_image = np.zeros((20, 20))\n logging.info('not able to load image at: ' + str(STAMPS_FOLDER + 'sci'\n + str(obs) + str(single_number)\n + str(arc) + '_Stacked.npy'))\n\n # If there is no science image, do not add images\n if int(np.sum(sci_image)) == 0:\n logging.info('No science image - passing')\n pass\n else:\n # do not analyze images where a large fraction of the image is masked\n if np.mean(mask_image) > 0.1:\n logging.info(str(np.mean(mask_image) * 100)\n + '% of image is masked... \\\n when it is more than 10% - exiting')\n pass\n else:\n # the images ahs been found successfully\n logging.info('adding images for obs: ' + str(obs))\n list_of_sci_images.append(sci_image)\n list_of_mask_images.append(mask_image)\n list_of_var_images.append(var_image)\n\n # observation which are of good enough quality to be analyzed get added here\n list_of_obs_cleaned.append(obs)\n\n logging.info('for spot ' + str(self.list_of_spots[s]) + ' len of list_of_sci_images: '\n + str(len(list_of_sci_images)))\n logging.info('len of accepted images ' + str(len(list_of_obs_cleaned))\n + ' / len of asked images ' + str(len(self.list_of_obs_cleaned[s])))\n\n # If there is no valid images imported, exit\n if list_of_sci_images == []:\n logging.info('No valid images - exiting')\n sys.exit(0)\n\n # if you were able only to import only a fraction of images\n # if this fraction is too low - exit\n if (len(list_of_obs_cleaned) / len(self.list_of_obs_cleaned[s])) < 0.6:\n logging.info('Fraction of images imported is too low - exiting')\n sys.exit(0)\n\n list_of_sci_images_multi_spot.append(list_of_sci_images)\n list_of_mask_images_multi_spot.append(list_of_mask_images)\n list_of_var_images_multi_spot.append(list_of_var_images)\n list_of_obs_cleaned_multi_spot.append(list_of_obs_cleaned)\n\n self.list_of_obs_cleaned = list_of_obs_cleaned\n\n self.list_of_sci_images = list_of_sci_images\n self.list_of_var_images = list_of_var_images\n self.list_of_mask_images = list_of_mask_images\n\n array_of_sci_images_multi_spot = np.array(list_of_sci_images_multi_spot)\n array_of_mask_images_multi_spot = np.array(list_of_mask_images_multi_spot)\n array_of_var_images_multi_spot = np.array(list_of_var_images_multi_spot)\n array_of_obs_cleaned_multi_spot = np.array(list_of_obs_cleaned_multi_spot)\n\n self.array_of_obs_cleaned_multi_spot = array_of_obs_cleaned_multi_spot\n\n return array_of_sci_images_multi_spot, array_of_var_images_multi_spot,\\\n array_of_mask_images_multi_spot, array_of_obs_cleaned_multi_spot\n\n def create_output_names(self, date_of_output):\n \"\"\"\n Get sci, var and mask data\n\n Parameters\n ----------\n\n Return\n ----------\n\n\n \"\"\"\n\n eps = self.eps\n\n list_of_NAME_OF_CHAIN = []\n list_of_NAME_OF_LIKELIHOOD_CHAIN = []\n\n for s in range(len(self.list_of_spots)):\n arc = self.list_of_arc[s]\n # to be consistent with previous versions of the code, use the last obs avalible in the name\n obs_for_naming = self.array_of_obs_cleaned_multi_spot[s][-1]\n # give it invidual name here, just to make srue that by accident we do not\n # overload the variable and cause errors downstream\n single_number_str = self.list_of_spots[s]\n NAME_OF_CHAIN = 'chain' + str(date_of_output) + '_Single_P_' + \\\n str(obs_for_naming) + str(single_number_str) + str(eps) + str(arc)\n NAME_OF_LIKELIHOOD_CHAIN = 'likechain' + str(date_of_output) + '_Single_P_' +\\\n str(obs_for_naming) + str(single_number_str) + str(eps) + str(arc)\n\n list_of_NAME_OF_CHAIN.append(NAME_OF_CHAIN)\n list_of_NAME_OF_LIKELIHOOD_CHAIN.append(NAME_OF_LIKELIHOOD_CHAIN)\n\n return list_of_NAME_OF_CHAIN, list_of_NAME_OF_LIKELIHOOD_CHAIN\n\n def get_finalArc(self, arc, date_of_input='Sep0521', direct_or_interpolation='direct'):\n # TODO make it so you ont need to specify date of input if you only want finalArc\n DATAFRAMES_FOLDER = self.DATAFRAMES_FOLDER\n\n dataset = self.dataset\n ################################################\n # (3.) import dataframes\n ################################################\n\n # where are the dataframes located\n # these files give auxiliary information which enables us to connect spot number with other properties\n # such as the position on the detector, wavelength, etc...\n # Ar (Argon)\n if str(arc) == 'Ar' or arc == 'HgAr':\n with open(DATAFRAMES_FOLDER + 'results_of_fit_many_' + str(direct_or_interpolation)\n + '_Ar_from_' + str(date_of_input) + '.pkl', 'rb') as f:\n results_of_fit_input_HgAr = pickle.load(f)\n logging.info('results_of_fit_input_Ar is taken from: ' + str(f))\n # if before considering all fibers\n if dataset < 8:\n with open(DATAFRAMES_FOLDER + 'finalAr_Feb2020', 'rb') as f:\n finalAr_Feb2020_dataset = pickle.load(f)\n else:\n with open(DATAFRAMES_FOLDER + 'finalAr_Jul2021.pkl', 'rb') as f:\n finalAr_Feb2020_dataset = pickle.load(f)\n\n # Ne (Neon)\n if str(arc) == 'Ne':\n with open(DATAFRAMES_FOLDER + 'results_of_fit_many_' + str(direct_or_interpolation)\n + '_Ne_from_' + str(date_of_input) + '.pkl', 'rb') as f:\n results_of_fit_input_Ne = pickle.load(f)\n logging.info('results_of_fit_input_Ne is taken from: ' + str(f))\n if dataset < 8:\n with open(DATAFRAMES_FOLDER + 'finalNe_Feb2020', 'rb') as f:\n finalNe_Feb2020_dataset = pickle.load(f)\n else:\n with open(DATAFRAMES_FOLDER + 'finalNe_Jul2021.pkl', 'rb') as f:\n finalNe_Feb2020_dataset = pickle.load(f)\n\n # Kr (Krypton)\n if str(arc) == 'Kr':\n with open(DATAFRAMES_FOLDER + 'results_of_fit_many_' + str(direct_or_interpolation)\n + '_Kr_from_' + str(date_of_input) + '.pkl', 'rb') as f:\n results_of_fit_input_Kr = pickle.load(f)\n logging.info('results_of_fit_input_Kr is taken from: ' + str(f))\n if dataset < 8:\n with open(DATAFRAMES_FOLDER + 'finalKr_Feb2020', 'rb') as f:\n finalKr_Feb2020_dataset = pickle.load(f)\n else:\n with open(DATAFRAMES_FOLDER + 'finalKr_Jul2021.pkl', 'rb') as f:\n finalKr_Feb2020_dataset = pickle.load(f)\n\n # depening on the arc, select the appropriate dataframe\n # change here to account for 21 fiber data\n if arc == \"HgAr\":\n results_of_fit_input = results_of_fit_input_HgAr\n # finalArc = finalHgAr_Feb2020_dataset\n elif arc == \"Ne\":\n results_of_fit_input = results_of_fit_input_Ne\n finalArc = finalNe_Feb2020_dataset\n elif arc == \"Kr\":\n results_of_fit_input = results_of_fit_input_Kr\n finalArc = finalKr_Feb2020_dataset\n elif arc == \"Ar\":\n results_of_fit_input = results_of_fit_input_HgAr\n finalArc = finalAr_Feb2020_dataset\n\n self.results_of_fit_input = results_of_fit_input\n self.finalArc = finalArc\n\n return finalArc\n\n def create_array_of_wavelengths(self):\n\n list_of_spots = self.list_of_spots\n\n list_of_wavelengths = []\n for s in range(len(list_of_spots)):\n arc = self.list_of_arc[s]\n finalArc = self.get_finalArc(arc)\n single_number = list_of_spots[s]\n wavelength = float(finalArc.iloc[int(single_number)]['wavelength'])\n logging.info(\"wavelength used for spot \"+str(s)+\" [nm] is: \" + str(wavelength))\n list_of_wavelengths.append(wavelength)\n array_of_wavelengths = np.array(list_of_wavelengths)\n\n return array_of_wavelengths\n\n def create_parametrization_proposals(self, date_of_input,\n direct_or_interpolation='direct',\n twentytwo_or_extra=56):\n \"\"\"\n\n\n Parameters\n ----------\n date_of_input: `str`\n Date desription of the input dataframe\n direct_or_interpolation: `str`\n Mode description of the input dataframe\n twentytwo_or_extra: `int`\n Highest Zernike to go to\n\n Return\n ----------\n\n Notes\n ----------\n \"\"\"\n\n list_of_spots = self. list_of_spots\n # dataset = self.dataset\n # DATAFRAMES_FOLDER = self.DATAFRAMES_FOLDER\n\n list_of_array_of_polyfit_1_parameterizations_proposal_shape_2d = []\n\n for s in range(len(list_of_spots)):\n arc = self.list_of_arc[s]\n finalArc = self.get_finalArc(arc, date_of_input=date_of_input,\n direct_or_interpolation=direct_or_interpolation)\n results_of_fit_input = self.results_of_fit_input\n\n single_number = list_of_spots[s]\n\n # you are passing multiple images, so allparameters and defocuses need to be passed into a list\n list_of_allparameters = []\n list_of_defocuses = []\n # search for the previous avaliable results\n # add the ones that you found in array_of_allparameters and for which\n # labels are avaliable in list_of_defocuses\n for label in ['m4', 'm35', 'm3', 'm05', '0', 'p05', 'p3', 'p35', 'p4']:\n\n # check if your single_number is avaliable\n\n logging.info('adding label ' + str(label) + ' with single_number '\n + str(int(single_number)) + ' for creation of array_of_allparameters')\n try:\n if int(single_number) < 999:\n logging.info(results_of_fit_input[label].index.astype(int))\n # if your single_number is avaliable go ahead\n if int(single_number) in results_of_fit_input[label].index.astype(\n int):\n logging.info('Solution for this spot is avaliable')\n if isinstance(results_of_fit_input[label].index[0], str) or str(\n type(results_of_fit_input[label].index[0])) == \"<class 'numpy.str_'>\":\n list_of_allparameters.append(\n results_of_fit_input[label].loc[str(single_number)].values)\n logging.info('results_of_fit_input[' + str(label) + '].loc['\n + str(int(single_number)) + '].values' + str(\n results_of_fit_input[label].loc[str(single_number)].values))\n else:\n # logging.info('results_of_fit_input[label]'+str(results_of_fit_input[label]))\n list_of_allparameters.append(\n results_of_fit_input[label].loc[int(single_number)].values)\n logging.info('results_of_fit_input[' + str(label) + '].loc['\n + str(int(single_number)) + '].values' + str(\n results_of_fit_input[label].loc[int(single_number)].values))\n list_of_defocuses.append(label)\n\n else:\n # if the previous solution is not avaliable,\n # find the closest avaliable, right?\n logging.info(\n 'Solution for this spot is not avaliable, reconstructing from nearby spot')\n\n # positions of all avaliable spots\n x_positions = finalArc.loc[results_of_fit_input[label].index.astype(\n int)]['xc_effective']\n y_positions = finalArc.loc[results_of_fit_input[label].index.astype(\n int)]['yc']\n logging.info('checkpoint 1')\n logging.info(label)\n # logging.info(results_of_fit_input[labelInput].index)\n # position of the input spot\n position_x_single_number = finalArc['xc_effective'].loc[int(\n single_number)]\n position_y_single_number = finalArc['yc'].loc[int(\n single_number)]\n logging.info('checkpoint 2')\n logging.info(position_x_single_number)\n distance_of_avaliable_spots = np.abs(\n (x_positions - position_x_single_number)**2\n + (y_positions - position_y_single_number)**2)\n single_number_input =\\\n distance_of_avaliable_spots[distance_of_avaliable_spots == # noqa W504\n np.min(distance_of_avaliable_spots)].index[0]\n logging.info(\n 'Nearest spot avaliable is: ' + str(single_number_input))\n if isinstance(results_of_fit_input[label].index[0], str) or str(\n type(results_of_fit_input[label].index[0])) == \"<class 'numpy.str_'>\":\n list_of_allparameters.append(\n results_of_fit_input[label].loc[str(single_number_input)].values)\n else:\n list_of_allparameters.append(\n results_of_fit_input[label].loc[int(single_number_input)].values)\n list_of_defocuses.append(label)\n logging.info('results_of_fit_input[' + str(label) + '].loc['\n + str(int(single_number_input)) + '].values' + str(\n results_of_fit_input[label].loc[int(single_number_input)].values))\n\n pass\n\n except BaseException:\n logging.info('not able to add label ' + str(label))\n pass\n\n array_of_allparameters = np.array(list_of_allparameters)\n\n # based on the information from the previous step (results at list_of_defocuses),\n # generate singular array_of_allparameters at list_of_labelInput positions\n # has shape 2xN, N = number of parameters\n logging.info('Variable twentytwo_or_extra: ' + str(twentytwo_or_extra))\n analysis_type = 'defocus'\n if analysis_type == 'defocus':\n logging.info('Variable array_of_allparameters.shape: '\n + str(array_of_allparameters.shape))\n\n # model_multi is only needed to create reasonable parametrizations and\n # could possibly be avoided in future versions?\n model_multi = LN_PFS_multi_same_spot(\n list_of_sci_images=self.list_of_sci_images,\n list_of_var_images=self.list_of_var_images,\n list_of_mask_images=self.list_of_mask_images,\n wavelength=800,\n dithering=1,\n save=0,\n verbosity=0,\n npix=1536,\n list_of_defocuses=self.list_of_labelInput,\n zmax=twentytwo_or_extra,\n double_sources=False,\n double_sources_positions_ratios=[0, 0],\n test_run=False)\n\n array_of_polyfit_1_parameterizations_proposal =\\\n model_multi.create_resonable_allparameters_parametrizations(\n array_of_allparameters=array_of_allparameters,\n list_of_defocuses_input=list_of_defocuses,\n zmax=twentytwo_or_extra,\n remove_last_n=2)\n\n # lets be explicit that the shape of the array is 2d\n array_of_polyfit_1_parameterizations_proposal_shape_2d =\\\n array_of_polyfit_1_parameterizations_proposal\n\n list_of_array_of_polyfit_1_parameterizations_proposal_shape_2d.append(\n array_of_polyfit_1_parameterizations_proposal_shape_2d)\n\n # array contaning the arrays with parametrizations for all spots\n array_of_array_of_polyfit_1_parameterizations_proposal_shape_2d = np.array(\n list_of_array_of_polyfit_1_parameterizations_proposal_shape_2d)\n\n self.array_of_array_of_polyfit_1_parameterizations_proposal_shape_2d = \\\n array_of_array_of_polyfit_1_parameterizations_proposal_shape_2d\n\n return array_of_array_of_polyfit_1_parameterizations_proposal_shape_2d\n\n def create_init_parameters_for_particles(self, zmax_input=56, analysis_type='defocus',\n analysis_type_fiber=None):\n \"\"\"\n Create initial parameters for all particles\n\n Parameters\n ----------\n zmax_input: `int`\n Highest Zernike order in input?\n\n analysis_type: `str`\n fiber_par\n Zernike_par?\n ?\n ?\n\n\n Return\n ----------\n\n\n \"\"\"\n logging.info('analysis_type '+str(analysis_type))\n options = self.options\n\n array_of_array_of_polyfit_1_parameterizations_proposal_shape_2d = \\\n self.array_of_array_of_polyfit_1_parameterizations_proposal_shape_2d\n list_of_spots = self.list_of_spots\n multi_var = self.multi_var\n logging.info('analysis_type_fiber: ' + str(analysis_type_fiber))\n logging.info(type(analysis_type_fiber))\n if analysis_type_fiber == \"fiber_par\" or analysis_type_fiber == \"fixed_fiber_par\":\n # x_fiber, y_fiber, effective_radius_illumination, frd_sigma, frd_lorentz_factor\n unified_index = [10, 11, 12, 13, 14]\n\n ################################################\n # (8.) Create init parameters for particles\n ################################################\n\n # TODO: ellimate either columns or columns22 variable\n # columns = columns22\n\n #############\n # First swarm\n\n # if working with defocus, but many images\n # z4, z5, z6, z7, z8, z9, , z11\n # z12, z13, z14, z15, z16, z17, z18, z19, z20, z21, z22\n # detFrac, strutFrac, dxFocal, dyFocal, slitFrac, slitFrac_dy\n # wide_0, wide_23, wide_43, misalign\n # x_fiber, y_fiber, effective_radius_illumination, frd_sigma, frd_lorentz_factor,\n # det_vert, slitHolder_frac_dx, grating_lines,\n # scattering_slope, scattering_amplitude\n # pixel_effect, fiber_r\n logging.info('analysis_type_fiber is: '+str(analysis_type_fiber))\n if analysis_type_fiber == \"fiber_par\":\n # dont vary global illumination paramters or Zernike\n stronger_array_01 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0,\n 1.2, 1.2, 1.2, 1.2, 1.2,\n 0, 0, 1,\n 1, 1,\n 1, 0.6, 1])\n elif analysis_type_fiber == \"fixed_fiber_par\":\n logging.info('inside analysis_type_fiber')\n stronger_array_01 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1,\n 0, 0, 0, 0, 0,\n 1, 1, 1,\n 1, 1,\n 1, 0.6, 1])\n else:\n stronger_array_01 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 1.2, 1.2, 1.2, 1.2, 1.2, 1.2,\n 1.2, 1.2, 1.2, 1.2,\n 1.2, 1.2, 1.2, 1.2, 1.2, 1.2,\n 1.2, 1.2,\n 1.2, 1.2, 1.2, 1.2, 1])\n\n # fix all of the properties that describe the fiber illumination\n # v051.b change\n # if self.analysis_type == 'fixed_single_spot':\n # stronger_array_01[19*2:][unified_index] = 0\n\n # we are passing the parametrizations, which need to be translated to parameters for each image\n # from Zernike_Module we imported the function `check_global_parameters'\n list_of_global_parameters = []\n for s in range(len(self.list_of_spots)):\n array_of_polyfit_1_parameterizations_proposal_shape_2d =\\\n array_of_array_of_polyfit_1_parameterizations_proposal_shape_2d[s]\n\n global_parameters = array_of_polyfit_1_parameterizations_proposal_shape_2d[:, 1][19:19 + 23]\n\n list_of_global_parameters.append(global_parameters)\n\n # create global parameters which are the same for all spots\n # modify global parameters which should be the same for all of the spots in the fiber\n # these parama\n array_of_global_parameters = np.array(list_of_global_parameters)\n global_parameters = np.mean(array_of_global_parameters, axis=0)\n checked_global_parameters = check_global_parameters(global_parameters)\n\n # return the unified global parameters to each spot, for the parameters which should be unified\n # index of global parameters that we are setting to be same\n # 'x_fiber', 'y_fiber', 'effective_radius_illumination', 'frd_sigma',\n # 'frd_lorentz_factor'\n\n for s in range(len(list_of_spots)):\n for i in unified_index:\n array_of_array_of_polyfit_1_parameterizations_proposal_shape_2d[s][:, 1][19:19 + 23][i] =\\\n checked_global_parameters[i]\n\n # list contaning parInit1 for each spot\n list_of_parInit1 = []\n for s in range(len(list_of_spots)):\n\n array_of_polyfit_1_parameterizations_proposal_shape_2d =\\\n array_of_array_of_polyfit_1_parameterizations_proposal_shape_2d[s]\n logging.info(\n 'array_of_polyfit_1_parameterizations_proposal_shape_2d: '\n + str(array_of_polyfit_1_parameterizations_proposal_shape_2d))\n parInit1 = create_parInit(\n allparameters_proposal=array_of_polyfit_1_parameterizations_proposal_shape_2d,\n multi=multi_var,\n pupil_parameters=None,\n allparameters_proposal_err=None,\n stronger=stronger_array_01,\n use_optPSF=None,\n deduced_scattering_slope=None,\n zmax=zmax_input)\n\n # the number of walkers is given by options array, specified with\n # the parameter eps at the start\n while len(parInit1) < options[0]:\n parInit1_2 = create_parInit(\n allparameters_proposal=array_of_polyfit_1_parameterizations_proposal_shape_2d,\n multi=multi_var,\n pupil_parameters=None,\n allparameters_proposal_err=None,\n stronger=stronger_array_01,\n use_optPSF=None,\n deduced_scattering_slope=None,\n zmax=zmax_input)\n parInit1 = np.vstack((parInit1, parInit1_2))\n\n list_of_parInit1.append(parInit1)\n\n # Standard deviation of parameters (for control only?).\n # One array is enough, because we can use it for both spots (right?).\n parInit1_std = []\n for i in range(parInit1.shape[1]):\n parInit1_std.append(np.std(parInit1[:, i]))\n parInit1_std = np.array(parInit1_std)\n logging.info('parInit1_std: ' + str(parInit1_std))\n\n # Number of particles and number of parameters\n particleCount = options[0]\n paramCount = len(parInit1[0])\n\n # initialize the particles\n particle_likelihood = np.array([-9999999])\n # particle_position = np.zeros(paramCount)\n # particle_velocity = np.zeros(paramCount)\n best_particle_likelihood = particle_likelihood[0]\n\n #\n list_of_array_of_particle_position_proposal = []\n list_of_array_of_particle_velocity_proposal = []\n list_of_best_particle_likelihood = []\n for s in range(len(list_of_spots)):\n parInit1 = list_of_parInit1[s]\n array_of_particle_position_proposal = parInit1[0:particleCount]\n array_of_particle_velocity_proposal = np.zeros(\n (particleCount, paramCount))\n\n list_of_array_of_particle_position_proposal.append(\n array_of_particle_position_proposal)\n list_of_array_of_particle_velocity_proposal.append(\n array_of_particle_velocity_proposal)\n list_of_best_particle_likelihood.append(best_particle_likelihood)\n\n return list_of_array_of_particle_position_proposal,\\\n list_of_array_of_particle_velocity_proposal, list_of_best_particle_likelihood, paramCount\n\n\n# ***********************\n# 'free' (not inside a class) definitions below\n# ***********************\n\ndef create_popt_for_custom_var(sci_image, var_image, mask_image=None):\n \"\"\"Create 2nd order poly fit; to be used in creation of custom var image\n\n TODO: same function in LN_PFS_Single... Very unsatifactory!\n\n The connection between variance and flux is determined from the provided science image\n and variance image.\n All of inputs have to be 2d np.arrays with same size.\n Introduced in 0.50 (PIPE2D-931)\n\n Called by Tokovinin_algorithm_chi_multi\n\n Parameters\n ----------\n sci_image : `np.array`\n Scientific array\n var_image : `np.array`\n Variance array\n mask_image : `np.array`\n Mask image\n\n Returns\n ----------\n custom_var_image : `np.array`\n Recreated variance map\n\n \"\"\"\n if mask_image is None:\n sci_pixels = sci_image.ravel()\n var_pixels = var_image.ravel()\n else:\n sci_pixels = sci_image[mask_image == 0].ravel()\n var_pixels = var_image[mask_image == 0].ravel()\n # z = np.polyfit(sci_pixels, var_pixels, deg=2)\n # if z[0] < 0:\n # z = np.polyfit(sci_pixels, var_pixels, deg=1)\n # p1 = np.poly1d(z)\n # custom_var_image = p1(sci_image)\n\n # I am using lambda expression to avoid superflous definition of quadratic function\n f = lambda x, *p: p[0] * x**2 + p[1] * x + p[2] # noqa : E373\n popt, pcov = scipy.optimize.curve_fit(f, sci_pixels, var_pixels, [0, 0, np.min(var_pixels)],\n bounds=([-np.inf, -np.inf, np.min(var_pixels)],\n [np.inf, np.inf, np.inf]))\n return popt\n\n\ndef create_custom_var_from_popt(model_image, popt):\n \"\"\"Creates variance map from the model image, given the 2nd poly fit parameters\n\n Introduced in 0.50 (PIPE2D-931)\n\n Parameters\n ----------\n modelImg : `np.array`\n Model image\n popt : `np.array`\n 2d polyfit parameters\n Returns\n ----------\n custom_var_image : `np.array`\n Recreated variance map\n \"\"\"\n # I am using lambda expression to avoid superflous definition of quadratic function\n f = lambda x, *p: p[0] * x**2 + p[1] * x + p[2] # noqa : E373\n custom_var_image = f(model_image, *popt)\n return custom_var_image\n\n\ndef svd_invert(matrix, threshold):\n '''\n :param matrix:\n :param threshold:\n :return:SCD-inverted matrix\n '''\n # logging.info 'MATRIX:',matrix\n u, ws, v = svd(matrix, full_matrices=True)\n\n # invw = inv(np.identity(len(ws))*ws)\n # return ws\n\n ww = np.max(ws)\n n = len(ws)\n invw = np.identity(n)\n ncount = 0\n\n for i in range(n):\n if ws[i] < ww * threshold:\n # log.info('SVD_INVERT: Value %i=%.2e rejected (threshold=%.2e).'%(i,ws[i],ww*threshold))\n invw[i][i] = 0.\n ncount += 1\n else:\n # logging.info 'WS[%4i] %15.9f'%(i,ws[i])\n invw[i][i] = 1. / ws[i]\n\n # log.info('%i singular values rejected in inversion'%ncount)\n # fixed error on September 18, 2020 - before it was missing one transpose, see below\n # inv_matrix = np.dot(u , np.dot( invw, v))\n inv_matrix = np.dot(u, np.dot(np.transpose(invw), v))\n\n return inv_matrix\n\n\ndef find_centroid_of_flux(image, mask=None):\n \"\"\"\n function giving the tuple of the position of weighted average of the flux in a square image\n indentical result as calculateCentroid from drp_stella.images\n\n @input image poststamp image for which to find center\n @input mask mask, same size as the image\n\n returns tuple with x and y center, in units of pixels\n \"\"\"\n if mask is None:\n mask = np.ones(image.shape)\n\n x_center = []\n y_center = []\n\n # if there are nan values (most likely cosmics), replace them with max value in the rest of the image\n # careful, this can seriously skew the results if not used for this purpose\n max_value_image = np.max(image[~np.isnan(image)])\n image[np.isnan(image)] = max_value_image\n\n I_x = []\n for i in range(len(image)):\n I_x.append([i, np.mean(image[:, i] * mask[:, i])])\n\n I_x = np.array(I_x)\n\n I_y = []\n for i in range(len(image)):\n I_y.append([i, np.mean(image[i] * mask[i])])\n\n I_y = np.array(I_y)\n\n x_center = (np.sum(I_x[:, 0] * I_x[:, 1]) / np.sum(I_x[:, 1]))\n y_center = (np.sum(I_y[:, 0] * I_y[:, 1]) / np.sum(I_y[:, 1]))\n\n return(x_center, y_center)\n\n\ndef create_parInit(allparameters_proposal, multi=None, pupil_parameters=None, allparameters_proposal_err=None,\n stronger=None, use_optPSF=None, deduced_scattering_slope=None, zmax=None):\n \"\"\"!given the suggested parametrs create array with randomized starting values to supply to fitting code\n\n @param allparameters_proposal array contaning suggested starting values for a model\n @param multi set to True when you want to analyze more images at once\n @param pupil_parameters fix parameters describing the pupil\n @param allparameters_proposal_err uncertantity on proposed parameters\n @param stronger factors which increases all uncertanties by a constant value\n @param use_optPFS fix all parameters that give pure optical PSF, except z4\n (allowing change in ['z4', 'scattering_slope', 'scattering_amplitude', 'pixel_effect', 'fiber_r', 'flux'])\n @param deduced_scattering_slope\n @param zmax\n\n \"\"\"\n\n np.random.seed(101)\n\n if multi:\n #\n if len(allparameters_proposal.shape) == 2:\n # if you have passed 2d parametrization you have to move it to one 1d\n array_of_polyfit_1_parameterizations = np.copy(allparameters_proposal)\n if zmax == 11:\n # not implemented\n pass\n if zmax == 22:\n # logging.info('zmax is 22, right: ' +str(zmax))\n # logging.info('len(array_of_polyfit_1_parameterizations[19:]) ' +\n # str(len(array_of_polyfit_1_parameterizations[19:]) ))\n\n # if you have passed the parametrization that goes to the zmax=22,\n # depending if you passed value for flux\n if len(array_of_polyfit_1_parameterizations[19:]) == 23:\n allparameters_proposal = np.concatenate(\n (array_of_polyfit_1_parameterizations[:19].ravel(),\n array_of_polyfit_1_parameterizations[19:-1][:, 1]))\n if len(array_of_polyfit_1_parameterizations[19:]) == 22:\n allparameters_proposal = np.concatenate(\n (array_of_polyfit_1_parameterizations[:19].ravel(),\n array_of_polyfit_1_parameterizations[19:][:, 1]))\n\n # if you have passed too many\n if len(array_of_polyfit_1_parameterizations[19:]) > 23:\n allparameters_proposal = np.concatenate(\n (array_of_polyfit_1_parameterizations[:19].ravel(),\n array_of_polyfit_1_parameterizations[19:][:, 1]))\n\n if zmax > 22:\n # will fail if you ask for z larger than 22 and you have not provided it\n allparameters_proposal = np.concatenate((array_of_polyfit_1_parameterizations[:19].ravel(\n ), array_of_polyfit_1_parameterizations[19:19 + 23][:, 1],\n array_of_polyfit_1_parameterizations[42:].ravel()))\n\n # if you have passed 1d parametrizations just copy\n else:\n\n allparameters_proposal = np.copy(allparameters_proposal)\n\n # if you are passing explicit estimate for uncertantity of parameters,\n # make sure length is the same as of the parameters\n if allparameters_proposal_err is not None:\n assert len(allparameters_proposal) == len(allparameters_proposal_err)\n\n # default value for multiplying all uncertantity values (making them stronger) is 1\n if stronger is None:\n stronger = 1\n\n # default value for zmax, if None is passed it is set at 22\n if zmax is None:\n zmax = 22\n\n # if you are passing fixed scattering slope at number deduced from larger defocused image\n # does not work with multi!!!! (really? - I think it should)\n if zmax == 11:\n if deduced_scattering_slope is not None:\n allparameters_proposal[26] = np.abs(deduced_scattering_slope)\n if zmax == 22:\n if deduced_scattering_slope is not None:\n allparameters_proposal[26 + 11] = np.abs(deduced_scattering_slope)\n\n if zmax == 11:\n if allparameters_proposal_err is None:\n if multi is None:\n # 8 values describing z4-z11\n allparameters_proposal_err = stronger * np.array([2, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,\n 0.1, 0.02, 0.1, 0.1, 0.1, 0.1,\n 0.3, 1, 0.1, 0.1,\n 0.15, 0.15, 0.1,\n 0.07, 0.2, 0.05, 0.4,\n 30000, 0.5, 0.01,\n 0.1, 0.05, 0.01])\n # fixed scattering slope at number deduced from larger defocused image\n if deduced_scattering_slope is not None:\n allparameters_proposal_err[26] = 0\n else:\n # 16 values describing z4-z11\n allparameters_proposal_err = stronger * np.array([2,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.1,\n 0.1,\n 0.1,\n 0.1,\n 0.05,\n 0.1,\n 0.2,\n 0.4,\n 0.1,\n 0.1,\n 0.1,\n 0.1,\n 0.02,\n 0.02,\n 0.5,\n 0.2,\n 0.1,\n 30000,\n 0.5,\n 0.01,\n 0.1,\n 0.05,\n 0.01])\n if zmax >= 22:\n\n extra_Zernike_parameters_number = zmax - 22\n # logging.info('extra_Zernike_parameters_number in parInit:' +str(extra_Zernike_parameters_number))\n if allparameters_proposal_err is None:\n if multi is None or multi is False:\n # 19 values describing z4-z22\n # smaller values for z12-z22\n # ['z4','z5','z6','z7','z8','z9','z10','z11',\n # 'z12','z13','z14','z15','z16','z17','z18','z19','z20','z21','z22',\n # 'detFrac','strutFrac','dxFocal','dyFocal','slitFrac','slitFrac_dy',\n # 'wide_0','wide_23','wide_43','misalign',\n # 'x_fiber','y_fiber','effective_ilum_radius',\n # 'frd_sigma','frd_lorentz_factor','det_vert','slitHolder_frac_dx',\n # 'grating_lines','scattering_slope','scattering_amplitude',\n # 'pixel_effect','fiber_r','flux']\n\n allparameters_proposal_err = stronger * np.array([2,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.08,\n 0.03,\n 0.1,\n 0.1,\n 0.016,\n 0.05,\n 0.3,\n 0.3,\n 0.3,\n 10,\n 0.15,\n 0.15,\n 0.1,\n 0.1,\n 0.64,\n 0.05,\n 0.2,\n 60000,\n 0.95,\n 0.014,\n 0.2,\n 0.14,\n 0.015])\n if extra_Zernike_parameters_number > 0:\n extra_Zernike_proposal = 0.0 * np.ones((extra_Zernike_parameters_number,))\n allparameters_proposal_err = np.concatenate(\n (allparameters_proposal_err, extra_Zernike_proposal))\n\n # fixed scattering slope at number deduced from larger defocused image\n if deduced_scattering_slope is not None:\n allparameters_proposal_err[26 + 11] = 0\n else:\n # determined from results_of_fit_input_HgAr\n\n allparameters_proposal_err = stronger * np.array([0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.25,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.15,\n 0.035,\n 0.02,\n 0.1,\n 0.1,\n 0.008,\n 0.05,\n 0.3,\n 0.3,\n 0.3,\n 10,\n 0.1,\n 0.1,\n 0.1,\n 0.08,\n 0.2,\n 0.05,\n 0.1,\n 60000,\n 0.4,\n 0.006,\n 0.2,\n 0.04,\n 0.015])\n\n # at the moment zero because I do not want these to actually move around,\n # but perhaps needs to be reconsidered in the future\n extra_Zernike_proposal = 0.0 * np.ones((extra_Zernike_parameters_number * 2,))\n allparameters_proposal_err = np.concatenate(\n (allparameters_proposal_err, extra_Zernike_proposal))\n\n if pupil_parameters is None:\n number_of_par = len(allparameters_proposal_err)\n else:\n number_of_par = len(allparameters_proposal_err) - len(pupil_parameters)\n\n walkers_mult = 6\n nwalkers = number_of_par * walkers_mult\n\n if zmax == 11:\n if multi is None or multi is False:\n zparameters_flatten = allparameters_proposal[0:8]\n zparameters_flatten_err = allparameters_proposal_err[0:8]\n globalparameters_flatten = allparameters_proposal[8:]\n globalparameters_flatten_err = allparameters_proposal_err[8:]\n\n else:\n zparameters_flatten = allparameters_proposal[0:8 * 2]\n zparameters_flatten_err = allparameters_proposal_err[0:8 * 2]\n globalparameters_flatten = allparameters_proposal[8 * 2:]\n globalparameters_flatten_err = allparameters_proposal_err[8 * 2:]\n # if we have 22 or more\n if zmax >= 22:\n if multi is None or multi is False:\n zparameters_flatten = allparameters_proposal[0:8 + 11]\n zparameters_flatten_err = allparameters_proposal_err[0:8 + 11]\n globalparameters_flatten = allparameters_proposal[8 + 11:8 + 11 + 23]\n globalparameters_flatten_err = allparameters_proposal_err[8 + 11:8 + 11 + 23]\n zparameters_extra_flatten = allparameters_proposal[(8 + 11) * 1 + 23:]\n zparameters_extra_flatten_err = allparameters_proposal_err[(8 + 11) * 1 + 23:]\n else:\n zparameters_flatten = allparameters_proposal[0:(8 + 11) * 2]\n zparameters_flatten_err = allparameters_proposal_err[0:(8 + 11) * 2]\n globalparameters_flatten = allparameters_proposal[(8 + 11) * 2:(8 + 11) * 2 + 23]\n globalparameters_flatten_err = allparameters_proposal_err[(8 + 11) * 2:(8 + 11) * 2 + 23]\n zparameters_extra_flatten = allparameters_proposal[(8 + 11) * 2 + 23:]\n zparameters_extra_flatten_err = allparameters_proposal_err[(8 + 11) * 2 + 23:]\n # logging.info('zparameters_flatten '+str(zparameters_flatten))\n\n if zmax == 11:\n if multi is None:\n try:\n for i in range(8):\n if i == 0:\n zparameters_flat_single_par = np.concatenate(([zparameters_flatten[i]],\n np.random.normal(\n zparameters_flatten[i], zparameters_flatten_err[i], nwalkers - 1)))\n else:\n zparameters_flat_single_par = np.concatenate(([zparameters_flatten[i]],\n np.random.normal(\n zparameters_flatten[i], zparameters_flatten_err[i], nwalkers - 1)))\n if i == 0:\n zparameters_flat = zparameters_flat_single_par\n else:\n zparameters_flat = np.column_stack((zparameters_flat, zparameters_flat_single_par))\n except NameError:\n logging.info('NameError!')\n else:\n try:\n for i in range(8 * 2):\n zparameters_flat_single_par = np.concatenate(([zparameters_flatten[i]], np.random.normal(\n zparameters_flatten[i], zparameters_flatten_err[i], nwalkers - 1)))\n if i == 0:\n zparameters_flat = zparameters_flat_single_par\n else:\n zparameters_flat = np.column_stack((zparameters_flat, zparameters_flat_single_par))\n except NameError:\n logging.info('NameError!')\n\n # if we have 22 or more\n if zmax >= 22:\n if multi is None or multi is False:\n try:\n\n for i in range(8 + 11):\n if i == 0:\n zparameters_flat_single_par = np.concatenate(([zparameters_flatten[i]],\n np.random.normal(\n zparameters_flatten[i], zparameters_flatten_err[i], nwalkers - 1)))\n else:\n zparameters_flat_single_par = np.concatenate(([zparameters_flatten[i]],\n np.random.normal(\n zparameters_flatten[i], zparameters_flatten_err[i], nwalkers - 1)))\n if i == 0:\n zparameters_flat = zparameters_flat_single_par\n else:\n zparameters_flat = np.column_stack((zparameters_flat, zparameters_flat_single_par))\n\n # if you are going for extra Zernike parameters\n # copied the same code from multi\n for i in range(extra_Zernike_parameters_number):\n\n zparameters_extra_flat_single_par = np.concatenate(([zparameters_extra_flatten[i]],\n np.random.normal(\n zparameters_extra_flatten[i], zparameters_extra_flatten_err[i], nwalkers - 1)))\n\n # zparameters_extra_flat_single_par=np.random.normal(0,0.05,nwalkers)\n # logging.info(zparameters_extra_flat_single_par.shape)\n if i == 0:\n zparameters_extra_flat = zparameters_extra_flat_single_par\n else:\n zparameters_extra_flat = np.column_stack(\n (zparameters_extra_flat, zparameters_extra_flat_single_par))\n\n except NameError:\n logging.info('NameError!')\n\n # in case that multi variable is turned on:\n else:\n try:\n for i in range((8 + 11) * 2):\n # logging.info('i'+str(i))\n # logging.info('zparameters_flatten[i]: '+str(zparameters_flatten[i]))\n # logging.info('zparameters_flatten_err[i]: '+str(zparameters_flatten_err[i]))\n # logging.info('nwalkers-1: '+str(nwalkers-1))\n # logging.info(np.random.normal(zparameters_flatten[i],zparameters_flatten_err[i],nwalkers-1))\n zparameters_flat_single_par = np.concatenate(([zparameters_flatten[i]], np.random.normal(\n zparameters_flatten[i], zparameters_flatten_err[i], nwalkers - 1)))\n\n if i == 0:\n zparameters_flat = zparameters_flat_single_par\n else:\n zparameters_flat = np.column_stack((zparameters_flat, zparameters_flat_single_par))\n\n # if you are going for extra Zernike parameters\n if zmax > 22:\n for i in range(extra_Zernike_parameters_number * 2):\n zparameters_extra_flat_single_par = np.concatenate(([zparameters_extra_flatten[i]],\n np.random.normal(\n zparameters_extra_flatten[i], zparameters_extra_flatten_err[i], nwalkers - 1)))\n\n # zparameters_extra_flat_single_par=np.random.normal(0,0.05,nwalkers)\n # logging.info(zparameters_extra_flat_single_par.shape)\n if i == 0:\n zparameters_extra_flat = zparameters_extra_flat_single_par\n else:\n zparameters_extra_flat = np.column_stack(\n (zparameters_extra_flat, zparameters_extra_flat_single_par))\n # logging.info(zparameters_extra_flat.shape)\n\n except NameError:\n logging.info('NameError!')\n\n try:\n div_same = 10\n\n # detFrac always positive\n globalparameters_flat_0 = np.abs(\n np.random.normal(\n globalparameters_flatten[0],\n globalparameters_flatten_err[0],\n nwalkers * 20))\n globalparameters_flat_0[np.random.choice(len(globalparameters_flat_0),\n size=int(len(globalparameters_flat_0)/div_same),\n replace=False)] = globalparameters_flatten[0]\n globalparameters_flat_0 = np.concatenate(([globalparameters_flatten[0]],\n globalparameters_flat_0[np.all(\n (globalparameters_flat_0 >= 0.6,\n globalparameters_flat_0 <= 0.8),\n axis=0)][0:nwalkers - 1]))\n # strutFrac always positive\n globalparameters_flat_1_long = np.abs(\n np.random.normal(\n globalparameters_flatten[1],\n globalparameters_flatten_err[1],\n nwalkers * 200))\n globalparameters_flat_1 = globalparameters_flat_1_long\n globalparameters_flat_1[np.random.choice(len(globalparameters_flat_1),\n size=int(len(globalparameters_flat_1)/div_same),\n replace=False)] = globalparameters_flatten[1]\n globalparameters_flat_1 = np.concatenate(([globalparameters_flatten[1]],\n globalparameters_flat_1[np.all(\n (globalparameters_flat_1 >= 0.07,\n globalparameters_flat_1 <= 0.13),\n axis=0)][0:nwalkers - 1]))\n # dxFocal\n globalparameters_flat_2 = np.random.normal(\n globalparameters_flatten[2],\n globalparameters_flatten_err[2],\n nwalkers * 20)\n globalparameters_flat_2[np.random.choice(len(globalparameters_flat_2),\n size=int(len(globalparameters_flat_2)/div_same),\n replace=False)] = globalparameters_flatten[2]\n globalparameters_flat_2 = np.concatenate(([globalparameters_flatten[2]],\n globalparameters_flat_2[np.all(\n (globalparameters_flat_2 >= -0.4,\n globalparameters_flat_2 <= 0.4),\n axis=0)][0:nwalkers - 1]))\n # dyFocal\n globalparameters_flat_3 = np.random.normal(\n globalparameters_flatten[3],\n globalparameters_flatten_err[3],\n nwalkers * 20)\n globalparameters_flat_3[np.random.choice(len(globalparameters_flat_3),\n size=int(len(globalparameters_flat_3)/div_same),\n replace=False)] = globalparameters_flatten[3]\n globalparameters_flat_3 = np.concatenate(([globalparameters_flatten[3]],\n globalparameters_flat_3[np.all(\n (globalparameters_flat_3 >= -0.4,\n globalparameters_flat_3 <= 0.4),\n axis=0)][0:nwalkers - 1]))\n # slitFrac\n globalparameters_flat_4 = np.abs(\n np.random.normal(\n globalparameters_flatten[4],\n globalparameters_flatten_err[4],\n nwalkers * 20))\n # logging.info(globalparameters_flatten_err[4])\n globalparameters_flat_4[np.random.choice(len(globalparameters_flat_4),\n size=int(len(globalparameters_flat_4)/div_same),\n replace=False)] = globalparameters_flatten[4]\n globalparameters_flat_4 = np.concatenate(([globalparameters_flatten[4]],\n globalparameters_flat_4[np.all(\n (globalparameters_flat_4 >= 0.05,\n globalparameters_flat_4 <= 0.09),\n axis=0)][0:nwalkers - 1]))\n # slitFrac_dy\n globalparameters_flat_5 = np.abs(\n np.random.normal(\n globalparameters_flatten[5],\n globalparameters_flatten_err[5],\n nwalkers * 20))\n globalparameters_flat_5[np.random.choice(len(globalparameters_flat_5),\n size=int(len(globalparameters_flat_5)/div_same),\n replace=False)] = globalparameters_flatten[5]\n globalparameters_flat_5 = np.concatenate(([globalparameters_flatten[5]],\n globalparameters_flat_5[np.all(\n (globalparameters_flat_5 >= -0.5,\n globalparameters_flat_5 <= 0.5),\n axis=0)][0:nwalkers - 1]))\n # wide_0\n globalparameters_flat_6 = np.abs(\n np.random.normal(\n globalparameters_flatten[6],\n globalparameters_flatten_err[6],\n nwalkers * 20))\n globalparameters_flat_6[np.random.choice(len(globalparameters_flat_6),\n size=int(len(globalparameters_flat_6)/div_same),\n replace=False)] = globalparameters_flatten[6]\n globalparameters_flat_6 = np.concatenate(([globalparameters_flatten[6]],\n globalparameters_flat_6[np.all(\n (globalparameters_flat_6 >= 0,\n globalparameters_flat_6 <= 1),\n axis=0)][0:nwalkers - 1]))\n # wide_23\n globalparameters_flat_7 = np.random.normal(\n globalparameters_flatten[7],\n globalparameters_flatten_err[7],\n nwalkers * 20)\n globalparameters_flat_7[np.random.choice(len(globalparameters_flat_7),\n size=int(len(globalparameters_flat_7)/div_same),\n replace=False)] = globalparameters_flatten[7]\n globalparameters_flat_7 = np.concatenate(([globalparameters_flatten[7]],\n globalparameters_flat_7[np.all(\n (globalparameters_flat_7 >= 0.0,\n globalparameters_flat_7 <= 1),\n axis=0)][0:nwalkers - 1]))\n # wide_43\n globalparameters_flat_8 = np.abs(\n np.random.normal(\n globalparameters_flatten[8],\n globalparameters_flatten_err[8],\n nwalkers * 20))\n globalparameters_flat_8[np.random.choice(len(globalparameters_flat_8),\n size=int(len(globalparameters_flat_8)/div_same),\n replace=False)] = globalparameters_flatten[8]\n globalparameters_flat_8 = np.concatenate(([globalparameters_flatten[8]],\n globalparameters_flat_8[np.all(\n (globalparameters_flat_8 >= 0,\n globalparameters_flat_8 <= 1),\n axis=0)][0:nwalkers - 1]))\n # misalign\n globalparameters_flat_9 = np.abs(\n np.random.normal(\n globalparameters_flatten[9],\n globalparameters_flatten_err[9],\n nwalkers * 20))\n globalparameters_flat_9[np.random.choice(len(globalparameters_flat_9),\n size=int(len(globalparameters_flat_9)/div_same),\n replace=False)] = globalparameters_flatten[9]\n globalparameters_flat_9 = np.concatenate(([globalparameters_flatten[9]],\n globalparameters_flat_9[np.all(\n (globalparameters_flat_9 >= 0,\n globalparameters_flat_9 <= 12),\n axis=0)][0:nwalkers - 1]))\n # x_fiber\n globalparameters_flat_10 = np.random.normal(\n globalparameters_flatten[10],\n globalparameters_flatten_err[10],\n nwalkers * 20)\n globalparameters_flat_10[np.random.choice(len(globalparameters_flat_10),\n size=int(len(globalparameters_flat_10)/div_same),\n replace=False)] = globalparameters_flatten[10]\n globalparameters_flat_10 = np.concatenate(([globalparameters_flatten[10]],\n globalparameters_flat_10[np.all(\n (globalparameters_flat_10 >= -0.4,\n globalparameters_flat_10 <= 0.4),\n axis=0)][0:nwalkers - 1]))\n # y_fiber\n globalparameters_flat_11 = np.random.normal(\n globalparameters_flatten[11],\n globalparameters_flatten_err[11],\n nwalkers * 20)\n globalparameters_flat_11[np.random.choice(len(globalparameters_flat_11),\n size=int(len(globalparameters_flat_11)/div_same),\n replace=False)] = globalparameters_flatten[11]\n globalparameters_flat_11 = np.concatenate(([globalparameters_flatten[11]],\n globalparameters_flat_11[np.all(\n (globalparameters_flat_11 >= -0.4,\n globalparameters_flat_11 <= 0.4),\n axis=0)][0:nwalkers - 1]))\n\n # effective_radius_illumination\n globalparameters_flat_12 = np.random.normal(\n globalparameters_flatten[12],\n globalparameters_flatten_err[12],\n nwalkers * 20)\n globalparameters_flat_12[np.random.choice(len(globalparameters_flat_12),\n size=int(len(globalparameters_flat_12)/div_same),\n replace=False)] = globalparameters_flatten[12]\n globalparameters_flat_12 = np.concatenate(([globalparameters_flatten[12]],\n globalparameters_flat_12[np.all(\n (globalparameters_flat_12 >= 0.7,\n globalparameters_flat_12 <= 1.0),\n axis=0)][0:nwalkers - 1]))\n\n if globalparameters_flatten[13] < 0.01:\n globalparameters_flatten[13] = 0.01\n # frd_sigma\n globalparameters_flat_13 = np.random.normal(\n globalparameters_flatten[13],\n globalparameters_flatten_err[13],\n nwalkers * 20)\n globalparameters_flat_13[np.random.choice(len(globalparameters_flat_13),\n size=int(len(globalparameters_flat_13)/div_same),\n replace=False)] = globalparameters_flatten[13]\n globalparameters_flat_13 = np.concatenate(([globalparameters_flatten[13]],\n globalparameters_flat_13[np.all(\n (globalparameters_flat_13 >= 0.01,\n globalparameters_flat_13 <= 0.4),\n axis=0)][0:nwalkers - 1]))\n\n # frd_lorentz_factor\n globalparameters_flat_14 = np.random.normal(\n globalparameters_flatten[14],\n globalparameters_flatten_err[14],\n nwalkers * 20)\n globalparameters_flat_14[np.random.choice(len(globalparameters_flat_14),\n size=int(len(globalparameters_flat_14)/div_same),\n replace=False)] = globalparameters_flatten[14]\n globalparameters_flat_14 = np.concatenate(([globalparameters_flatten[14]],\n globalparameters_flat_14[np.all(\n (globalparameters_flat_14 >= 0.01,\n globalparameters_flat_14 <= 1),\n axis=0)][0:nwalkers - 1]))\n\n # det_vert\n globalparameters_flat_15 = np.random.normal(\n globalparameters_flatten[15],\n globalparameters_flatten_err[15],\n nwalkers * 20)\n globalparameters_flat_15[np.random.choice(len(globalparameters_flat_15),\n size=int(len(globalparameters_flat_15)/div_same),\n replace=False)] = globalparameters_flatten[15]\n globalparameters_flat_15 = np.concatenate(([globalparameters_flatten[15]],\n globalparameters_flat_15[np.all(\n (globalparameters_flat_15 >= 0.85,\n globalparameters_flat_15 <= 1.15),\n axis=0)][0:nwalkers - 1]))\n\n # slitHolder_frac_dx\n globalparameters_flat_16 = np.random.normal(\n globalparameters_flatten[16],\n globalparameters_flatten_err[16],\n nwalkers * 20)\n globalparameters_flat_16[np.random.choice(len(globalparameters_flat_16),\n size=int(len(globalparameters_flat_16)/div_same),\n replace=False)] = globalparameters_flatten[16]\n globalparameters_flat_16 = np.concatenate(([globalparameters_flatten[16]],\n globalparameters_flat_16[np.all(\n (globalparameters_flat_16 >= -0.8,\n globalparameters_flat_16 <= 0.8),\n axis=0)][0:nwalkers - 1]))\n\n # grating lines\n globalparameters_flat_17 = np.random.normal(\n globalparameters_flatten[17],\n globalparameters_flatten_err[17],\n nwalkers * 20)\n globalparameters_flat_17[np.random.choice(len(globalparameters_flat_17),\n size=int(len(globalparameters_flat_17)/div_same),\n replace=False)] = globalparameters_flatten[17]\n globalparameters_flat_17 = np.concatenate(([globalparameters_flatten[17]],\n globalparameters_flat_17[np.all(\n (globalparameters_flat_17 >= 1200,\n globalparameters_flat_17 <= 120000),\n axis=0)][0:nwalkers - 1]))\n\n # scattering_slope\n globalparameters_flat_18 = np.random.normal(\n globalparameters_flatten[18],\n globalparameters_flatten_err[18],\n nwalkers * 20)\n globalparameters_flat_18[np.random.choice(len(globalparameters_flat_18),\n size=int(len(globalparameters_flat_18)/div_same),\n replace=False)] = globalparameters_flatten[18]\n globalparameters_flat_18 = np.concatenate(([globalparameters_flatten[18]],\n globalparameters_flat_18[np.all(\n (globalparameters_flat_18 >= 1.5,\n globalparameters_flat_18 <= 3.0),\n axis=0)][0:nwalkers - 1]))\n # scattering_amplitude\n globalparameters_flat_19 = np.random.normal(\n globalparameters_flatten[19],\n globalparameters_flatten_err[19],\n nwalkers * 20)\n globalparameters_flat_19[np.random.choice(len(globalparameters_flat_19),\n size=int(len(globalparameters_flat_19)/div_same),\n replace=False)] = globalparameters_flatten[19]\n globalparameters_flat_19 = np.concatenate(([globalparameters_flatten[19]],\n globalparameters_flat_19[np.all(\n (globalparameters_flat_19 >= 0.0,\n globalparameters_flat_19 <= 0.4),\n axis=0)][0:nwalkers - 1]))\n # pixel_effect\n globalparameters_flat_20 = np.random.normal(\n globalparameters_flatten[20],\n globalparameters_flatten_err[20],\n nwalkers * 20)\n globalparameters_flat_20[np.random.choice(len(globalparameters_flat_20),\n size=int(len(globalparameters_flat_20)/div_same),\n replace=False)] = globalparameters_flatten[20]\n globalparameters_flat_20 = np.concatenate(([globalparameters_flatten[20]],\n globalparameters_flat_20[np.all(\n (globalparameters_flat_20 >= 0.15,\n globalparameters_flat_20 <= 0.8),\n axis=0)][0:nwalkers - 1]))\n\n # fiber_r\n if globalparameters_flatten[21] < 1.74:\n globalparameters_flatten[21] = 1.8\n\n globalparameters_flat_21 = np.random.normal(\n globalparameters_flatten[21],\n globalparameters_flatten_err[21],\n nwalkers * 20)\n globalparameters_flat_21[np.random.choice(len(globalparameters_flat_21),\n size=int(len(globalparameters_flat_21)/div_same),\n replace=False)] = globalparameters_flatten[21]\n globalparameters_flat_21 = np.concatenate(([globalparameters_flatten[21]],\n globalparameters_flat_21[np.all(\n (globalparameters_flat_21 >= 1.74,\n globalparameters_flat_21 <= 1.98),\n axis=0)][0:nwalkers - 1]))\n\n if len(globalparameters_flatten) == 23:\n # flux\n globalparameters_flat_22 = np.random.normal(\n globalparameters_flatten[22], globalparameters_flatten_err[22], nwalkers * 20)\n globalparameters_flat_22 = np.concatenate(([globalparameters_flatten[22]],\n globalparameters_flat_22[np.all(\n (globalparameters_flat_22 >= 0.98,\n globalparameters_flat_22 <= 1.02),\n axis=0)][0:nwalkers - 1]))\n else:\n pass\n\n # uncomment in order to troubleshoot and show many parameters generated for each parameter\n \"\"\"\n for i in [globalparameters_flat_0,globalparameters_flat_1,globalparameters_flat_2,\n globalparameters_flat_3,\n globalparameters_flat_4,globalparameters_flat_5,\n globalparameters_flat_6,globalparameters_flat_7,\n globalparameters_flat_8,globalparameters_flat_9,\n globalparameters_flat_10,\n globalparameters_flat_11,globalparameters_flat_12,\n globalparameters_flat_13,\n globalparameters_flat_14,globalparameters_flat_15,\n globalparameters_flat_16,\n globalparameters_flat_17,globalparameters_flat_18,\n globalparameters_flat_19,\n globalparameters_flat_20,globalparameters_flat_21,\n globalparameters_flat_22]:\n logging.info(str(i[0])+': '+str(len(i)))\n \"\"\"\n if pupil_parameters is None:\n if len(globalparameters_flatten) == 23:\n # logging.info('considering globalparameters_flatten 23 ')\n # logging.info(globalparameters_flat_0.shape)\n # logging.info(globalparameters_flat_3.shape)\n # logging.info(globalparameters_flat_6.shape)\n # logging.info(globalparameters_flat_9.shape)\n # logging.info(globalparameters_flat_12.shape)\n # logging.info(globalparameters_flat_15.shape)\n # logging.info(globalparameters_flat_18.shape)\n # logging.info(globalparameters_flat_21.shape)\n # logging.info(globalparameters_flat_22.shape)\n globalparameters_flat = np.column_stack(\n (globalparameters_flat_0,\n globalparameters_flat_1,\n globalparameters_flat_2,\n globalparameters_flat_3,\n globalparameters_flat_4,\n globalparameters_flat_5,\n globalparameters_flat_6,\n globalparameters_flat_7,\n globalparameters_flat_8,\n globalparameters_flat_9,\n globalparameters_flat_10,\n globalparameters_flat_11,\n globalparameters_flat_12,\n globalparameters_flat_13,\n globalparameters_flat_14,\n globalparameters_flat_15,\n globalparameters_flat_16,\n globalparameters_flat_17,\n globalparameters_flat_18,\n globalparameters_flat_19,\n globalparameters_flat_20,\n globalparameters_flat_21,\n globalparameters_flat_22))\n else:\n logging.info('not considering globalparameters_flatten 23 !!! ')\n globalparameters_flat = np.column_stack(\n (globalparameters_flat_0,\n globalparameters_flat_1,\n globalparameters_flat_2,\n globalparameters_flat_3,\n globalparameters_flat_4,\n globalparameters_flat_5,\n globalparameters_flat_6,\n globalparameters_flat_7,\n globalparameters_flat_8,\n globalparameters_flat_9,\n globalparameters_flat_10,\n globalparameters_flat_11,\n globalparameters_flat_12,\n globalparameters_flat_13,\n globalparameters_flat_14,\n globalparameters_flat_15,\n globalparameters_flat_16,\n globalparameters_flat_17,\n globalparameters_flat_18,\n globalparameters_flat_19,\n globalparameters_flat_20,\n globalparameters_flat_21))\n\n else:\n globalparameters_flat = np.column_stack(\n (globalparameters_flat_6,\n globalparameters_flat_7,\n globalparameters_flat_8,\n globalparameters_flat_9,\n globalparameters_flat_16,\n globalparameters_flat_17,\n globalparameters_flat_18,\n globalparameters_flat_19,\n globalparameters_flat_20,\n globalparameters_flat_21,\n globalparameters_flat_22))\n\n except NameError:\n logging.info(\"NameError\")\n\n # logging.info('globalparameters_flat.shape'+str(zparameters_flat.shape) )\n # logging.info('globalparameters_flat.shape'+str(globalparameters_flat.shape) )\n\n if zmax <= 22:\n allparameters = np.column_stack((zparameters_flat, globalparameters_flat))\n if zmax > 22:\n # logging.info('globalparameters_flat.shape'+str(zparameters_extra_flat.shape) )\n allparameters = np.column_stack((zparameters_flat, globalparameters_flat, zparameters_extra_flat))\n\n parInit = allparameters.reshape(nwalkers, number_of_par)\n\n # hm..... relic of some older code, needs cleaning\n if use_optPSF is not None:\n if zmax == 11:\n for i in range(1, 25):\n # for i in np.concatenate((range(1,7),range(8,25))):\n # for i in range(8,25):\n parInit[:, i] = np.full(len(parInit[:, i]), allparameters_proposal[i])\n else:\n for i in range(1, 25 + 11):\n # for i in np.concatenate((range(1,7),range(8,25))):\n # for i in range(8,25):\n parInit[:, i] = np.full(len(parInit[:, i]), allparameters_proposal[i])\n else:\n pass\n\n return parInit\n\n\ndef Ifun16Ne(lambdaV, lambda0, Ne):\n \"\"\"Construct Lorentizan scattering kernel\n Parameters\n ----------\n lambdaV: `float`\n wavelength at which compute the grating effect\n lambda0: `float`\n reference wavelength\n Ne: `int`\n number of effective grating lines of the spectrograph\n Returns\n ----------\n value_of_scatter: `float`\n strenth of the kernel at lambdaV wavelength\n \"\"\"\n return (lambda0 / (Ne * np.pi * np.sqrt(2)))**2 / \\\n ((lambdaV - lambda0)**2 + (lambda0 / (Ne * np.pi * np.sqrt(2)))**2)\n\n\ndef custom_fftconvolve(array1, array2):\n assert array1.shape == array2.shape\n\n fft_result = np.fft.fftshift(\n np.real(\n np.fft.irfft2(\n np.fft.rfft2(array1) * np.fft.rfft2(array2),\n s=np.array(\n array1.shape))))\n # ensure that the resulting shape is an odd nubmer, needed for fft convolutions later\n if array1.shape[0] % 2 == 0:\n # if the size of an array is even number\n fft_result = fft_result[:fft_result.shape[0] - 1, :fft_result.shape[1] - 1]\n else:\n # if the size of an array is an odd number\n fft_result = fft_result[:fft_result.shape[0] - 2, :fft_result.shape[1] - 2]\n fft_result = np.pad(fft_result, 1, 'constant', constant_values=0)\n\n return fft_result\n\n\n# Taken from galsim.Aperature() code\n\n # Some quick notes for Josh:\n # - Relation between real-space grid with size theta and pitch dtheta (dimensions of angle)\n # and corresponding (fast) Fourier grid with size 2*maxk and pitch stepk (dimensions of\n # inverse angle):\n # stepk = 2*pi/theta\n # maxk = pi/dtheta\n # - Relation between aperture of size L and pitch dL (dimensions of length, not angle!) and\n # (fast) Fourier grid:\n # dL = stepk * lambda / (2 * pi)\n # L = maxk * lambda / pi\n # - Implies relation between aperture grid and real-space grid:\n # dL = lambda/theta\n # L = lambda/dtheta\ndef stepK(pupil_plane_scale, lam, scale_unit=galsim.arcsec):\n \"\"\"Return the Fourier grid spacing for this aperture at given wavelength.\n\n @param lam Wavelength in nanometers.\n @param scale_unit Inverse units in which to return result [default: galsim.arcsec]\n @returns Fourier grid spacing.\n \"\"\"\n return 2 * np.pi * pupil_plane_scale / (lam * 1e-9) * scale_unit / galsim.radians\n\n\ndef maxK(pupil_plane_size, lam, scale_unit=galsim.arcsec):\n \"\"\"Return the Fourier grid half-size for this aperture at given wavelength.\n\n @param lam Wavelength in nanometers.\n @param scale_unit Inverse units in which to return result [default: galsim.arcsec]\n @returns Fourier grid half-size.\n \"\"\"\n return np.pi * pupil_plane_size / (lam * 1e-9) * scale_unit / galsim.radians\n\n\ndef sky_scale(pupil_plane_size, lam, scale_unit=galsim.arcsec):\n \"\"\"Return the image scale for this aperture at given wavelength\n\n @param lam Wavelength in nanometers.\n @param scale_unit Units in which to return result [default: galsim.arcsec]\n @returns Image scale.\n \"\"\"\n return (lam * 1e-9) / pupil_plane_size * galsim.radians / scale_unit\n\n\ndef sky_size(pupil_plane_scale, lam, scale_unit=galsim.arcsec):\n \"\"\"Return the image size for this aperture at given wavelength.\n @param lam Wavelength in nanometers.\n @param scale_unit Units in which to return result [default: galsim.arcsec]\n @returns Image size.\n \"\"\"\n return (lam * 1e-9) / pupil_plane_scale * galsim.radians / scale_unit\n\n\ndef remove_pupil_parameters_from_all_parameters(parameters):\n lenpar = len(parameters)\n return np.concatenate((parameters[:lenpar - 23],\n parameters[lenpar - 17:lenpar - 13],\n parameters[lenpar - 7:]))\n\n\ndef add_pupil_parameters_to_all_parameters(parameters, pupil_parameters):\n lenpar = len(parameters)\n return np.concatenate((parameters[:lenpar - 11],\n pupil_parameters[:6],\n parameters[lenpar - 11:lenpar - 7],\n pupil_parameters[6:],\n parameters[lenpar - 7:]),\n axis=0)\n\n\n# taken https://gist.github.com/shoyer/c0f1ddf409667650a076c058f9a17276\n# also here https://github.com/scikit-image/scikit-image/issues/2827\n\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ndef _reflect_breaks(size: int) -> np.ndarray:\n \"\"\"Calculate cell boundaries with reflecting boundary conditions.\"\"\"\n result = np.concatenate([[0], 0.5 + np.arange(size - 1), [size - 1]])\n assert len(result) == size + 1\n return result\n\n\ndef _interval_overlap(first_breaks: np.ndarray,\n second_breaks: np.ndarray) -> np.ndarray:\n \"\"\"Return the overlap distance between all pairs of intervals.\n\n Args:\n first_breaks: breaks between entries in the first set of intervals, with\n shape (N+1,). Must be a non-decreasing sequence.\n second_breaks: breaks between entries in the second set of intervals, with\n shape (M+1,). Must be a non-decreasing sequence.\n\n Returns:\n Array with shape (N, M) giving the size of the overlapping region between\n each pair of intervals.\n \"\"\"\n first_upper = first_breaks[1:]\n second_upper = second_breaks[1:]\n upper = np.minimum(first_upper[:, np.newaxis], second_upper[np.newaxis, :])\n\n first_lower = first_breaks[:-1]\n second_lower = second_breaks[:-1]\n lower = np.maximum(first_lower[:, np.newaxis], second_lower[np.newaxis, :])\n\n return np.maximum(upper - lower, 0)\n\n# @lru_cache()\n\n\ndef _resize_weights(\n old_size: int, new_size: int, reflect: bool = False) -> np.ndarray:\n \"\"\"Create a weight matrix for resizing with the local mean along an axis.\n\n Args:\n old_size: old size.\n new_size: new size.\n reflect: whether or not there are reflecting boundary conditions.\n\n Returns:\n NumPy array with shape (new_size, old_size). Rows sum to 1.\n \"\"\"\n if not reflect:\n old_breaks = np.linspace(0, old_size, num=old_size + 1, dtype=np.float32)\n new_breaks = np.linspace(0, old_size, num=new_size + 1, dtype=np.float32)\n else:\n old_breaks = _reflect_breaks(old_size)\n new_breaks = (old_size - 1) / (new_size - 1) * _reflect_breaks(new_size)\n\n weights = _interval_overlap(new_breaks, old_breaks)\n weights /= np.sum(weights, axis=1, keepdims=True)\n assert weights.shape == (new_size, old_size)\n return weights\n\n\ndef resize(array: np.ndarray,\n shape: Tuple[int, ...],\n reflect_axes: Iterable[int] = ()) -> np.ndarray:\n \"\"\"Resize an array with the local mean / bilinear scaling.\n\n Works for both upsampling and downsampling in a fashion equivalent to\n block_mean and zoom, but allows for resizing by non-integer multiples. Prefer\n block_mean and zoom when possible, as this implementation is probably slower.\n\n Args:\n array: array to resize.\n shape: shape of the resized array.\n reflect_axes: iterable of axis numbers with reflecting boundary conditions,\n mirrored over the center of the first and last cell.\n\n Returns:\n Array resized to shape.\n\n Raises:\n ValueError: if any values in reflect_axes fall outside the interval\n [-array.ndim, array.ndim).\n \"\"\"\n reflect_axes_set = set()\n for axis in reflect_axes:\n if not -array.ndim <= axis < array.ndim:\n raise ValueError('invalid axis: {}'.format(axis))\n reflect_axes_set.add(axis % array.ndim)\n\n output = array\n for axis, (old_size, new_size) in enumerate(zip(array.shape, shape)):\n reflect = axis in reflect_axes_set\n weights = _resize_weights(old_size, new_size, reflect=reflect)\n\n product = np.tensordot(output, weights, [[axis], [-1]])\n output = np.moveaxis(product, -1, axis)\n return output\n\n\ndef check_global_parameters(globalparameters, test_print=None, fit_for_flux=None):\n # When running big fits these are limits which ensure that the code does\n # not wander off in totally non physical region\n\n globalparameters_output = np.copy(globalparameters)\n\n # det frac\n if globalparameters[0] <= 0.6 or globalparameters[0] >= 0.8:\n logging.info('globalparameters[0] outside limits; value: '\n + str(globalparameters[0])) if test_print == 1 else False\n if globalparameters[0] <= 0.6:\n globalparameters_output[0] = 0.6\n if globalparameters[0] > 0.8:\n globalparameters_output[0] = 0.8\n\n # strut frac\n if globalparameters[1] < 0.07 or globalparameters[1] > 0.13:\n logging.info('globalparameters[1] outside limits') if test_print == 1 else False\n if globalparameters[1] <= 0.07:\n globalparameters_output[1] = 0.07\n if globalparameters[1] > 0.13:\n globalparameters_output[1] = 0.13\n\n # slit_frac < strut frac\n # if globalparameters[4]<globalparameters[1]:\n # logging.info('globalparameters[1] not smaller than 4 outside limits')\n # return -np.inf\n\n # dx Focal\n if globalparameters[2] < -0.4 or globalparameters[2] > 0.4:\n logging.info('globalparameters[2] outside limits') if test_print == 1 else False\n if globalparameters[2] < -0.4:\n globalparameters_output[2] = -0.4\n if globalparameters[2] > 0.4:\n globalparameters_output[2] = 0.4\n\n # dy Focal\n if globalparameters[3] > 0.4:\n logging.info('globalparameters[3] outside limits') if test_print == 1 else False\n globalparameters_output[3] = 0.4\n if globalparameters[3] < -0.4:\n logging.info('globalparameters[3] outside limits') if test_print == 1 else False\n globalparameters_output[3] = -0.4\n\n # slitFrac\n if globalparameters[4] < 0.05:\n logging.info('globalparameters[4] outside limits') if test_print == 1 else False\n globalparameters_output[4] = 0.05\n if globalparameters[4] > 0.09:\n logging.info('globalparameters[4] outside limits') if test_print == 1 else False\n globalparameters_output[4] = 0.09\n\n # slitFrac_dy\n if globalparameters[5] < -0.5:\n logging.info('globalparameters[5] outside limits') if test_print == 1 else False\n globalparameters_output[5] = -0.5\n if globalparameters[5] > 0.5:\n logging.info('globalparameters[5] outside limits') if test_print == 1 else False\n globalparameters_output[5] = +0.5\n\n # radiometricEffect / wide_0\n if globalparameters[6] < 0:\n logging.info('globalparameters[6] outside limits') if test_print == 1 else False\n globalparameters_output[6] = 0\n if globalparameters[6] > 1:\n logging.info('globalparameters[6] outside limits') if test_print == 1 else False\n globalparameters_output[6] = 1\n\n # radiometricExponent / wide_23\n if globalparameters[7] < 0:\n logging.info('globalparameters[7] outside limits') if test_print == 1 else False\n globalparameters_output[7] = 0\n # changed in v0.42\n if globalparameters[7] > 1:\n logging.info('globalparameters[7] outside limits') if test_print == 1 else False\n globalparameters_output[7] = 1\n\n # x_ilum /wide_43\n if globalparameters[8] < 0:\n logging.info('globalparameters[8] outside limits') if test_print == 1 else False\n globalparameters_output[8] = 0\n # changed in v0.42\n if globalparameters[8] > 1:\n logging.info('globalparameters[8] outside limits') if test_print == 1 else False\n globalparameters_output[8] = 1\n\n # y_ilum / misalign\n if globalparameters[9] < 0:\n logging.info('globalparameters[9] outside limits') if test_print == 1 else False\n globalparameters_output[9] = 0\n if globalparameters[9] > 12:\n logging.info('globalparameters[9] outside limits') if test_print == 1 else False\n globalparameters_output[9] = 12\n\n # x_fiber\n if globalparameters[10] < -0.4:\n logging.info('globalparameters[10] outside limits') if test_print == 1 else False\n globalparameters_output[10] = -0.4\n if globalparameters[10] > 0.4:\n logging.info('globalparameters[10] outside limits') if test_print == 1 else False\n globalparameters_output[10] = 0.4\n\n # y_fiber\n if globalparameters[11] < -0.4:\n logging.info('globalparameters[11] outside limits') if test_print == 1 else False\n globalparameters_output[11] = -0.4\n if globalparameters[11] > 0.4:\n logging.info('globalparameters[11] outside limits') if test_print == 1 else False\n globalparameters_output[11] = 0.4\n\n # effective_radius_illumination\n if globalparameters[12] < 0.7:\n logging.info('globalparameters[12] outside limits') if test_print == 1 else False\n globalparameters_output[12] = 0.7\n if globalparameters[12] > 1.0:\n logging.info('globalparameters[12] outside limits') if test_print == 1 else False\n globalparameters_output[12] = 1\n\n # frd_sigma\n if globalparameters[13] < 0.01:\n logging.info('globalparameters[13] outside limits') if test_print == 1 else False\n globalparameters_output[13] = 0.01\n if globalparameters[13] > .4:\n logging.info('globalparameters[13] outside limits') if test_print == 1 else False\n globalparameters_output[13] = 0.4\n\n # frd_lorentz_factor\n if globalparameters[14] < 0.01:\n logging.info('globalparameters[14] outside limits') if test_print == 1 else False\n globalparameters_output[14] = 0.01\n if globalparameters[14] > 1:\n logging.info('globalparameters[14] outside limits') if test_print == 1 else False\n globalparameters_output[14] = 1\n\n # det_vert\n if globalparameters[15] < 0.85:\n logging.info('globalparameters[15] outside limits') if test_print == 1 else False\n globalparameters_output[15] = 0.85\n if globalparameters[15] > 1.15:\n logging.info('globalparameters[15] outside limits') if test_print == 1 else False\n globalparameters_output[15] = 1.15\n\n # slitHolder_frac_dx\n if globalparameters[16] < -0.8:\n logging.info('globalparameters[16] outside limits') if test_print == 1 else False\n globalparameters_output[16] = -0.8\n if globalparameters[16] > 0.8:\n logging.info('globalparameters[16] outside limits') if test_print == 1 else False\n globalparameters_output[16] = 0.8\n\n # grating_lines\n if globalparameters[17] < 1200:\n logging.info('globalparameters[17] outside limits') if test_print == 1 else False\n globalparameters_output[17] = 1200\n if globalparameters[17] > 120000:\n logging.info('globalparameters[17] outside limits') if test_print == 1 else False\n globalparameters_output[17] = 120000\n\n # scattering_slope\n if globalparameters[18] < 1.5:\n logging.info('globalparameters[18] outside limits') if test_print == 1 else False\n globalparameters_output[18] = 1.5\n if globalparameters[18] > +3.0:\n logging.info('globalparameters[18] outside limits') if test_print == 1 else False\n globalparameters_output[18] = 3\n\n # scattering_amplitude\n if globalparameters[19] < 0:\n logging.info('globalparameters[19] outside limits') if test_print == 1 else False\n globalparameters_output[19] = 0\n if globalparameters[19] > +0.4:\n logging.info('globalparameters[19] outside limits') if test_print == 1 else False\n globalparameters_output[19] = 0.4\n\n # pixel_effect\n if globalparameters[20] < 0.15:\n logging.info('globalparameters[20] outside limits') if test_print == 1 else False\n globalparameters_output[20] = 0.15\n if globalparameters[20] > +0.8:\n logging.info('globalparameters[20] outside limits') if test_print == 1 else False\n globalparameters_output[20] = 0.8\n\n # fiber_r\n if globalparameters[21] < 1.74:\n logging.info('globalparameters[21] outside limits') if test_print == 1 else False\n globalparameters_output[21] = 1.74\n if globalparameters[21] > +1.98:\n logging.info('globalparameters[21] outside limits') if test_print == 1 else False\n globalparameters_output[21] = 1.98\n\n # flux\n if fit_for_flux:\n globalparameters_output[22] = 1\n else:\n if globalparameters[22] < 0.98:\n logging.info('globalparameters[22] outside limits') if test_print == 1 else False\n globalparameters_output[22] = 0.98\n if globalparameters[22] > 1.02:\n logging.info('globalparameters[22] outside limits') if test_print == 1 else False\n globalparameters_output[22] = 1.02\n\n return globalparameters_output\n\n\ndef move_parametrizations_from_2d_shape_to_1d_shape(allparameters_best_parametrization_shape_2d):\n \"\"\"\n change the linear parametrization array in 2d shape to parametrization array in 1d\n\n @param allparameters_best_parametrization_shape_2d linear parametrization, 2d array\n\n \"\"\"\n\n if allparameters_best_parametrization_shape_2d.shape[0] > 42:\n # if you are using above Zernike above 22\n # logging.info('we are creating new result with Zernike above 22')\n allparameters_best_parametrization_shape_1d = np.concatenate((\n allparameters_best_parametrization_shape_2d[:19].ravel(),\n allparameters_best_parametrization_shape_2d[19:19 + 23][:, 1],\n allparameters_best_parametrization_shape_2d[19 + 23:].ravel()))\n\n else:\n # logging.info('we are creating new result with Zernike at 22')\n allparameters_best_parametrization_shape_1d = np.concatenate((\n allparameters_best_parametrization_shape_2d[:19].ravel(),\n allparameters_best_parametrization_shape_2d[19:-1][:, 1]))\n\n return allparameters_best_parametrization_shape_1d # noqa: W292"
] |
[
[
"numpy.dot",
"numpy.polyfit",
"scipy.linalg.svd",
"numpy.minimum",
"numpy.sqrt",
"numpy.linspace",
"numpy.arctan",
"numpy.nan_to_num",
"numpy.fft.fftshift",
"numpy.concatenate",
"numpy.seterr",
"numpy.max",
"numpy.round",
"numpy.zeros_like",
"scipy.ndimage.filters.gaussian_filter",
"numpy.mean",
"numpy.moveaxis",
"numpy.random.randn",
"numpy.all",
"numpy.exp",
"numpy.where",
"numpy.hypot",
"numpy.ones_like",
"numpy.pad",
"numpy.clip",
"numpy.arange",
"numpy.matmul",
"numpy.save",
"numpy.sin",
"numpy.ceil",
"numpy.copy",
"numpy.size",
"numpy.std",
"numpy.tensordot",
"numpy.insert",
"numpy.column_stack",
"numpy.zeros",
"numpy.log",
"scipy.signal.fftconvolve",
"numpy.min",
"numpy.linalg.inv",
"numpy.isnan",
"numpy.median",
"numpy.modf",
"numpy.tan",
"numpy.log10",
"numpy.identity",
"numpy.fft.rfft2",
"numpy.floor",
"numpy.transpose",
"numpy.meshgrid",
"numpy.array",
"numpy.sum",
"numpy.maximum",
"numpy.abs",
"numpy.random.seed",
"numpy.set_printoptions",
"numpy.cos",
"numpy.ones",
"numpy.random.normal",
"numpy.mod",
"numpy.vstack"
]
] |
NathalieGou/mne-nirs
|
[
"387e34bef41d7cb4103d0901070c1e54e7f0707e"
] |
[
"mne_nirs/visualisation/tests/test_visualisation.py"
] |
[
"# Authors: Robert Luke <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport mne\nimport mne_nirs\nimport numpy as np\nfrom mne.utils import (requires_pysurfer, traits_test)\nfrom mne_nirs.experimental_design.tests.test_experimental_design import \\\n _load_dataset\nfrom mne_nirs.experimental_design import make_first_level_design_matrix\nfrom mne_nirs.statistics import run_GLM\nfrom mne_nirs.visualisation import plot_glm_topo\n\nmne.viz.set_3d_backend('pyvista')\n\n\n@requires_pysurfer\n@traits_test\ndef test_plot_nirs_source_detector():\n data_path = mne.datasets.testing.data_path()\n subjects_dir = mne.datasets.sample.data_path() + '/subjects'\n raw = mne.io.read_raw_nirx(data_path + '/NIRx/nirx_15_2_recording_w_short')\n\n mne_nirs.visualisation.plot_nirs_source_detector(\n np.random.randn(len(raw.ch_names)),\n raw.info, show_axes=True,\n subject='fsaverage',\n trans='fsaverage',\n surfaces=['brain'],\n fnirs=False,\n subjects_dir=subjects_dir,\n verbose=True)\n\n mne_nirs.visualisation.plot_nirs_source_detector(\n 100.0 + np.random.randn(len(raw.ch_names)),\n raw.info, show_axes=True,\n subject='fsaverage',\n trans='fsaverage',\n surfaces=['brain'],\n fnirs=False,\n subjects_dir=subjects_dir,\n verbose=True)\n\n mne_nirs.visualisation.plot_nirs_source_detector(\n 100.0 + np.random.randn(len(raw.ch_names)),\n raw.info, show_axes=True,\n subject='fsaverage',\n trans='fsaverage',\n surfaces=['brain'],\n fnirs=False, cmap='viridis',\n subjects_dir=subjects_dir,\n verbose=True)\n\n\ndef test_run_plot_GLM_topo():\n raw_intensity = _load_dataset()\n raw_intensity.crop(450, 600) # Keep the test fast\n\n design_matrix = make_first_level_design_matrix(raw_intensity,\n drift_order=1,\n drift_model='polynomial')\n raw_od = mne.preprocessing.nirs.optical_density(raw_intensity)\n raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od)\n glm_estimates = run_GLM(raw_haemo, design_matrix)\n fig = plot_glm_topo(raw_haemo, glm_estimates, design_matrix)\n # 5 conditions (A,B,C,Drift,Constant) * two chroma + 2xcolorbar\n assert len(fig.axes) == 12\n\n fig = plot_glm_topo(raw_haemo, glm_estimates, design_matrix,\n requested_conditions=['A', 'B'])\n # Two conditions * two chroma + 2xcolorbar\n assert len(fig.axes) == 6\n\n\ndef test_run_plot_GLM_contrast_topo():\n raw_intensity = _load_dataset()\n raw_intensity.crop(450, 600) # Keep the test fast\n\n design_matrix = make_first_level_design_matrix(raw_intensity,\n drift_order=1,\n drift_model='polynomial')\n raw_od = mne.preprocessing.nirs.optical_density(raw_intensity)\n raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od)\n glm_est = run_GLM(raw_haemo, design_matrix)\n contrast_matrix = np.eye(design_matrix.shape[1])\n basic_conts = dict([(column, contrast_matrix[i])\n for i, column in enumerate(design_matrix.columns)])\n contrast_LvR = basic_conts['A'] - basic_conts['B']\n contrast = mne_nirs.statistics.compute_contrast(glm_est, contrast_LvR)\n fig = mne_nirs.visualisation.plot_glm_contrast_topo(raw_haemo, contrast)\n assert len(fig.axes) == 3\n\n\ndef test_run_plot_GLM_contrast_topo_single_chroma():\n raw_intensity = _load_dataset()\n raw_intensity.crop(450, 600) # Keep the test fast\n\n design_matrix = make_first_level_design_matrix(raw_intensity,\n drift_order=1,\n drift_model='polynomial')\n raw_od = mne.preprocessing.nirs.optical_density(raw_intensity)\n raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od)\n raw_haemo = raw_haemo.pick(picks='hbo')\n glm_est = run_GLM(raw_haemo, design_matrix)\n contrast_matrix = np.eye(design_matrix.shape[1])\n basic_conts = dict([(column, contrast_matrix[i])\n for i, column in enumerate(design_matrix.columns)])\n contrast_LvR = basic_conts['A'] - basic_conts['B']\n contrast = mne_nirs.statistics.compute_contrast(glm_est, contrast_LvR)\n fig = mne_nirs.visualisation.plot_glm_contrast_topo(raw_haemo, contrast)\n assert len(fig.axes) == 2\n"
] |
[
[
"numpy.eye"
]
] |
inertialsense/inertial-sense-sdk
|
[
"68893b0207d12d21420744706578f90ef215be4a"
] |
[
"python/pylib/newISDataAnalytics.py"
] |
[
"'''\nCreated on Feb 17, 2017\n\n'''\n# from numbers import Number\nimport numpy as np\nimport os\nimport sys\n# import ctypes as ct\nimport math\nimport pylib.ISToolsDataSorted as itd\nimport subprocess as subprocess\nimport pdb\n\nfrom pylab import plt\nfrom scipy.interpolate import interp1d\nfrom pylib.pose import norm, qlog, qexp, lla2ned, quat2eulerArray, meanOfQuatArray, qboxminus, qboxplus, qmult, qinv\nimport yaml\n\nRAD2DEG = 180 / np.pi\nDEG2RAD = np.pi / 180\n\n\ndef checkRawDataDrop(log):\n for dev in log.devices:\n if 'debugArray' in dev.data.keys() and 'GPS1Raw' in dev.data.keys():\n dbg = dev.data['debugArray']['i']\n counts = dev.data['GPS1Raw']['count'][dev.data['GPS1Raw']['type'] == 1]\n total_obs_in_log = np.sum(counts)\n obs_at_start = dbg[0, :]\n obs_at_end = dbg[-1, :]\n print(\"===================== Raw GPS Message Statistics ======================\")\n print(\"uINS - %d\", dev.serialNumber)\n print(\"recorded observations: %d, \" % total_obs_in_log)\n print(\"observations reported by firmware %d, \" % obs_at_end[3])\n\ndef checkTempOfLOF(log):\n for dev in log.devices:\n if 'gpsRtkNav' not in dev.data.keys() or 'sysParams' not in dev.data.keys():\n continue\n fix_status = dev.data['gpsRtkNav']['status'] & 0xFF00 == 0xC00\n time_of_lost_fix = dev.data['gpsRtkNav']['towMs'][np.argmax(~fix_status)]\n imu_temp_at_time_of_lost_fix = dev.data['sysParams']['imuTemp'][np.argmax(dev.data['sysParams']['towMs'] > time_of_lost_fix)]\n baro_temp_at_time_of_lost_fix = dev.data['sysParams']['baroTemp'][np.argmax(dev.data['sysParams']['towMs'] > time_of_lost_fix)]\n time_of_regain_fix = dev.data['gpsRtkNav']['towMs'][len(fix_status) - np.argmax(~fix_status[::-1]) - 1]\n imu_temp_at_time_of_regain_fix = dev.data['sysParams']['imuTemp'][np.argmax(dev.data['sysParams']['towMs'] > time_of_regain_fix)]\n baro_temp_at_time_of_regain_fix = dev.data['sysParams']['baroTemp'][np.argmax(dev.data['sysParams']['towMs'] > time_of_regain_fix)]\n print (\"dev: {0} lost: IMU = {1}, baro = {2}, regain: IMU={3}, baro={4}\".format(dev.data['devInfo']['serialNumber'][0], imu_temp_at_time_of_lost_fix,\n baro_temp_at_time_of_lost_fix, imu_temp_at_time_of_regain_fix, baro_temp_at_time_of_regain_fix))\n\ndef calcRMS(log, directory, subdir):\n file = open(\"/home/superjax/Documents/inertial_sense/config.yaml\", 'r')\n config = yaml.load(file)\n directory = config[\"directory\"]\n serials = config['serials']\n\n numDev = len(log.devices)\n debug = True\n np.set_printoptions(linewidth=200)\n averageRMS = []\n compassing = False\n navMode = (log.devices[0].data['ins2']['iStatus'] & 0x1000)[-1]\n if numDev > 1:\n\n print(\"\\nComputing RMS Accuracies: (%d devices)\" % (numDev))\n\n # Build a 3D array of the data. idx 0 = Device, idx 1 = t, idx 2 = [t, lla, uvw, log(q)]\n data = [np.hstack((log.devices[i].data['ins2']['tow'][:,None],\n log.devices[i].data['ins2']['lla'],\n log.devices[i].data['ins2']['uvw'],\n log.devices[i].data['ins2']['q'])) for i in range(numDev)]\n\n # Make sure that the time stamps are realistic\n for dev in range(numDev):\n if (np.diff(data[dev][:,0]) > 10.0).any():\n print(\"large gaps in data for dev\", dev, \"chopping off data before gap\".format(dev))\n idx = np.argmax(np.diff(data[dev][:,0])) + 1\n data[dev] = data[dev][idx:,:]\n\n min_time = max([np.min(data[i][:,0]) for i in range(numDev)])\n max_time = min([np.max(data[i][:,0]) for i in range(numDev)])\n\n\n # If we are in compassing mode, then only calculate RMS after all devices have fix\n if log.devices[0].data['flashCfg']['RTKCfgBits'][-1] == 8:\n compassing = True\n time_of_fix_ms = [dev.data['gps1RtkCmpRel']['timeOfWeekMs'][np.argmax(dev.data['gps1RtkCmpRel']['arRatio'] > 3.0)] / 1000.0 for dev in log.devices]\n # print time_of_fix_ms\n min_time = max(time_of_fix_ms)\n\n\n # only take the second half of the data\n min_time = max_time - (max_time - min_time)/2.0\n\n # Resample at a steady 100 Hz\n dt = 0.01\n t = np.arange(1.0, max_time - min_time - 1.0, dt)\n for i in range(numDev):\n # Chop off extra data at beginning and end\n data[i] = data[i][data[i][:, 0] > min_time]\n data[i] = data[i][data[i][:, 0] < max_time]\n\n # Chop off the min time so everything is wrt to start\n data[i][:,0] -= min_time\n\n # Interpolate data so that it has all the same timestamps\n fi = interp1d(data[i][:,0], data[i][:,1:].T, kind='cubic', fill_value='extrapolate', bounds_error=False)\n data[i] = np.hstack((t[:,None], fi(t).T))\n\n # Normalize Quaternions\n data[i][:,7:] /= norm(data[i][:,7:], axis=1)[:,None]\n\n # Make a big 3D numpy array we can work with [dev, sample, data]\n data = np.array(data)\n\n\n # Convert lla to ned using first device lla at center of data as reference\n refLla = data[0, int(round(len(t) / 2.0)), 1:4].copy()\n for i in range(numDev):\n data[i, :, 1:4] = lla2ned(refLla, data[i, :, 1:4])\n\n # Find Mean Data\n means = np.empty((len(data[0]), 10))\n means[:,:6] = np.mean(data[:,:,1:7], axis=0) # calculate mean position and velocity across devices\n means[:,6:] = meanOfQuatArray(data[:,:,7:].transpose((1,0,2))) # Calculate mean attitude of all devices at each timestep\n\n # calculate the attitude error for each device\n att_error = np.array([qboxminus(data[dev,:, 7:], means[:, 6:]) for dev in range(numDev)])\n # Calculate the Mounting Bias for all devices (assume the mounting bias is the mean of the attitude error)\n mount_bias = np.mean(att_error, axis=1)\n if compassing:\n # When in compassing, assume all units are sharing the same GPS antennas and should therefore have\n # no mounting bias in heading\n mount_bias[:,2] = 0\n\n # Adjust all attitude errors to the mean by the mounting bias\n # TODO: Talk to Walt about the mount bias - because this probably includes more biases than just the mount bias\n att_error -= mount_bias[:,None,:]\n\n if debug:\n colors = ['r', 'g', 'b', 'm']\n plt.figure()\n plt.subplot(3,1,1) # Position\n plt.title(\"position error\")\n for m in range(3):\n for n in range(numDev):\n plt.plot(data[n,:,0], data[n, :, m+1], color = colors[m])\n plt.plot(data[0,:,0], means[:, m], linewidth=2, color = colors[m])\n plt.subplot(3,1,2)\n plt.title(\"velocity error\")\n for m in range(3):\n for n in range(numDev):\n plt.plot(data[n,:,0], data[n, :, m+4], color = colors[m] )\n plt.plot(data[0,:,0], means[:, m+3], linewidth=2, color = colors[m])\n plt.subplot(3,1,3)\n plt.title(\"attitude\")\n for m in range(4):\n for n in range(numDev):\n plt.plot(data[n,:,0], data[n, :, m+7], color = colors[m])\n plt.plot(data[0,:,0], means[:, m+6], linewidth=2, color = colors[m])\n\n plt.figure()\n for m in range(3):\n plt.subplot(3, 1, m +1)\n for n in range(numDev):\n plt.plot(att_error[n, :, m])\n plt.show()\n\n # RMS = sqrt ( 1/N sum(e^2) )\n RMS = np.empty((numDev, 9))\n # Calculate RMS for position and velocity\n RMS[:,:6] = np.sqrt(np.mean(np.square(data[:, :, 1:7] - means[:,0:6]), axis=1))\n # Calculate RMS for attitude\n RMS[:,6:] = np.sqrt(np.mean(np.square(att_error[:, :, :]), axis=1))\n\n # Average RMS across devices\n averageRMS = np.mean(RMS, axis=0)\n\n print(\"average RMS = \", averageRMS)\n\n # Convert Attitude Error To Euler Angles\n RMS_euler = RMS[:,6:] # quat2eulerArray(qexp(RMS[:,6:]))\n averageRMS_euler = averageRMS[6:] #quat2eulerArray(qexp(averageRMS[None,6:]))[0]\n mount_bias_euler = mount_bias #quat2eulerArray(qexp(mount_bias))\n\n\n # Below is creating the RMS report\n thresholds = np.array([0.2, 0.2, 0.2, # LLA\n 0.2, 0.2, 0.2, # UVW\n 0.1, 0.1, 2.0]) # ATT (rpy) - (deg)\n if navMode or compassing:\n thresholds[8] = 0.3 # Higher heading accuracy\n else:\n thresholds[:6] = np.inf\n\n thresholds[6:] *= DEG2RAD # convert degrees threshold to radians\n\n\n\n specRatio = averageRMS / thresholds\n\n filename = os.path.join(directory, 'RMS_report_new.txt');\n f = open(filename, 'w')\n f.write('***** Performance Analysis Report - %s *****\\n' % (subdir))\n f.write('\\n')\n f.write('Directory: %s\\n' % (directory))\n mode = \"AHRS\"\n if navMode: mode = \"NAV\"\n if compassing: mode = \"DUAL GNSS\"\n f.write(\"\\n\")\n\n # Print Table of RMS accuracies\n line = 'Device '\n if navMode:\n f.write(\n '--------------------------------------------------- RMS Accuracy -------------------------------------------\\n')\n line = line + 'UVW[ (m/s) (m/s) (m/s) ], NED[ (m) (m) (m) ],'\n else: # AHRS mode\n f.write('-------------- RMS Accuracy --------------\\n')\n line = line + ' Att [ (deg) (deg) (deg) ]\\n'\n f.write(line)\n\n for n in range(0, numDev):\n devInfo = itd.cDevInfo(log.devices[n].data['devInfo'])\n line = '%2d SN%d ' % (n, devInfo.v['serialNumber'][-1])\n if navMode:\n line = line + '[ %6.4f %6.4f %6.4f ], ' % ( RMS[n, 3], RMS[n, 4], RMS[n, 5])\n line = line + '[ %6.4f %6.4f %6.4f ], ' % ( RMS[n, 0], RMS[n, 1], RMS[n, 2])\n line = line + '[ %6.4f %6.4f %6.4f ]\\n' % (RMS_euler[n, 0] * RAD2DEG, RMS_euler[n, 1] * RAD2DEG, RMS_euler[n, 2] * RAD2DEG)\n f.write(line)\n\n line = 'AVERAGE: '\n if navMode:\n f.write('------------------------------------------------------------------------------------------------------------\\n')\n line = line + '[%7.4f %7.4f %7.4f ], ' % (averageRMS[3], averageRMS[4], averageRMS[5])\n line = line + '[%7.4f %7.4f %7.4f ], ' % (averageRMS[0], averageRMS[1], averageRMS[2])\n else: # AHRS mode\n f.write('------------------------------------------\\n')\n line = line + '[%7.4f %7.4f %7.4f ]\\n' % (averageRMS_euler[0] * RAD2DEG, averageRMS_euler[1] * RAD2DEG, averageRMS_euler[2] * RAD2DEG)\n f.write(line)\n\n line = 'THRESHOLD: '\n if navMode:\n line = line + '[%7.4f %7.4f %7.4f ], ' % (thresholds[3], thresholds[4], thresholds[5])\n line = line + '[%7.4f %7.4f %7.4f ], ' % (thresholds[0], thresholds[1], thresholds[2])\n line = line + '[%7.4f %7.4f %7.4f ]\\n' % (thresholds[6] * RAD2DEG, thresholds[7] * RAD2DEG, thresholds[8] * RAD2DEG)\n f.write(line)\n\n line = 'RATIO: '\n if navMode:\n f.write('------------------------------------------------------------------------------------------------------------\\n')\n line = line + '[%7.4f %7.4f %7.4f ], ' % (specRatio[3], specRatio[4], specRatio[5])\n line = line + '[%7.4f %7.4f %7.4f ], ' % (specRatio[0], specRatio[1], specRatio[2])\n else: # AHRS mode\n f.write('------------------------------------------\\n')\n line = line + '[%7.4f %7.4f %7.4f ]\\n' % (specRatio[6], specRatio[7], specRatio[8])\n f.write(line)\n\n def pass_fail(ratio): return 'FAIL' if ratio > 1.0 else 'PASS'\n\n line = 'PASS/FAIL: '\n if navMode:\n line = line + '[ %s %s %s ], ' % (pass_fail(specRatio[3]),pass_fail(specRatio[4]),pass_fail(specRatio[5])) # LLA\n line = line + '[ %s %s %s ], ' % (pass_fail(specRatio[0]),pass_fail(specRatio[1]),pass_fail(specRatio[2])) # UVW\n line = line + '[ %s %s %s ]\\n' % (pass_fail(specRatio[6]),pass_fail(specRatio[7]),pass_fail(specRatio[8])) # ATT\n f.write(line)\n\n if navMode:\n f.write(' ')\n else: # AHRS mode\n f.write(' ')\n f.write('('+mode +' mode)\\n\\n')\n\n # Print Mounting Biases\n f.write('--------------- Angular Mounting Biases ----------------\\n')\n f.write('Device Euler Biases[ (deg) (deg) (deg) ]\\n')\n for n in range(0, numDev):\n devInfo = itd.cDevInfo(log.devices[n].data['devInfo'])\n f.write('%2d SN%d [ %7.4f %7.4f %7.4f ]\\n' % (\n n, devInfo.v['serialNumber'][-1], mount_bias_euler[n, 0] * RAD2DEG, mount_bias_euler[n, 1] * RAD2DEG, mount_bias_euler[n, 2] * RAD2DEG))\n f.write('\\n')\n\n # Print Device Version Information\n f.write(\n '------------------------------------------- Device Info -------------------------------------------------\\n')\n for n in range(0, numDev):\n devInfo = itd.cDevInfo(log.devices[n].data['devInfo'])\n hver = devInfo.v['hardwareVer'][-1]\n cver = devInfo.v['commVer'][-1]\n fver = devInfo.v['firmwareVer'][-1]\n buld = devInfo.v['build'][-1]\n repo = devInfo.v['repoRevision'][-1]\n date = devInfo.v['buildDate'][-1]\n time = devInfo.v['buildTime'][-1]\n addi = devInfo.v['addInfo'][-1]\n f.write(\n '%2d SN%d HW: %d.%d.%d.%d FW: %d.%d.%d.%d build %d repo %d Proto: %d.%d.%d.%d Date: %04d-%02d-%02d %02d:%02d:%02d %s\\n' % (\n n, devInfo.v['serialNumber'][-1],\n hver[3], hver[2], hver[1], hver[0],\n fver[3], fver[2], fver[1], fver[0], buld, repo,\n cver[3], cver[2], cver[1], cver[0],\n 2000 + date[2], date[1], date[0],\n time[3], time[2], time[1],\n addi))\n f.write('\\n')\n\n f.close()\n\n # Automatically open report in Windows\n if 'win' in sys.platform:\n subprocess.Popen([\"notepad.exe\", filename]) # non-blocking call\n if 'linux' in sys.platform:\n subprocess.Popen(['gedit', filename])\n\n print(\"Done.\")\n\n # TODO: Pass out the union of the test errors\n return averageRMS\n\n"
] |
[
[
"numpy.square",
"numpy.hstack",
"numpy.min",
"numpy.arange",
"numpy.set_printoptions",
"numpy.max",
"scipy.interpolate.interp1d",
"numpy.mean",
"numpy.argmax",
"numpy.diff",
"numpy.array",
"numpy.sum",
"numpy.empty"
]
] |
bopopescu/drawquest-web
|
[
"8d8f9149b6efeb65202809a5f8916386f58a1b3b"
] |
[
"website/drawquest/management/commands/coin_balance_histogram.py"
] |
[
"import datetime\n\nfrom django.core.management.base import BaseCommand\nfrom matplotlib import pyplot\n\nfrom drawquest.apps.drawquest_auth.models import User\nfrom drawquest import knobs, economy\n\n\nclass Command(BaseCommand):\n args = ''\n help = ''\n\n def handle(self, *args, **options):\n balances = []\n\n for user in User.objects.filter(date_joined__gte=datetime.datetime.now() - datetime.timedelta(days=90)):\n if user.comments.count() < 2:\n continue\n balances.append(economy.balance(user))\n\n figures = {'count': 0}\n\n def hist(bins):\n figures['count'] += 1\n pyplot.figure(figures['count'])\n pyplot.hist(balances, bins=bins, facecolor='green', alpha=0.75)\n pyplot.xlabel('Coins')\n pyplot.ylabel('User count')\n pyplot.suptitle(r'Coin balances')\n pyplot.grid(True)\n pyplot.savefig('/home/ubuntu/graphs/{}.svg'.format(figures['count']), dpi=180)\n\n hist(range(0, 101, 1))\n hist(range(0, 301, 5))\n hist(range(0, 1001, 10))\n hist(range(1001, 10000, 100))\n\n"
] |
[
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel"
]
] |
cclauss/DBNet
|
[
"53dd1b2d3d609257920bb3553ca3d012abe29f0e"
] |
[
"models/resnet152_pm.py"
] |
[
"import os\r\nimport sys\r\n\r\nimport tensorflow as tf\r\nimport scipy\r\nimport numpy as np\r\n\r\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\r\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\r\n\r\nimport tf_util\r\nfrom custom_layers import Scale\r\nfrom keras.layers import Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, add, Reshape, Activation\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.models import Model\r\n\r\ndef placeholder_inputs(batch_size, img_rows=66, img_cols=200, separately=False):\r\n imgs_pl = tf.placeholder(tf.float32,\r\n shape=(batch_size, img_rows, img_cols, 3))\r\n fmaps_pl = tf.placeholder(tf.float32,\r\n shape=(batch_size, img_rows, img_cols, 3))\r\n if separately:\r\n speeds_pl = tf.placeholder(tf.float32, shape=(batch_size))\r\n angles_pl = tf.placeholder(tf.float32, shape=(batch_size))\r\n labels_pl = [speeds_pl, angles_pl]\r\n labels_pl = tf.placeholder(tf.float32, shape=(batch_size, 2))\r\n return imgs_pl, fmaps_pl, labels_pl\r\n\r\n\r\ndef get_resnet(img_rows=224, img_cols=224, separately=False):\r\n \"\"\"\r\n Resnet 152 Model for Keras\r\n\r\n Model Schema and layer naming follow that of the original Caffe implementation\r\n https://github.com/KaimingHe/deep-residual-networks\r\n\r\n ImageNet Pretrained Weights\r\n Theano: https://drive.google.com/file/d/0Byy2AcGyEVxfZHhUT3lWVWxRN28/view?usp=sharing\r\n TensorFlow: https://drive.google.com/file/d/0Byy2AcGyEVxfeXExMzNNOHpEODg/view?usp=sharing\r\n\r\n Parameters:\r\n img_rows, img_cols - resolution of inputs\r\n channel - 1 for grayscale, 3 for color\r\n \"\"\"\r\n\r\n img_input = Input(shape=(img_rows, img_cols, 3), name='data')\r\n\r\n eps = 1.1e-5\r\n x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)\r\n x = Convolution2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=False)(x)\r\n x = BatchNormalization(epsilon=eps, axis=3, name='bn_conv1')(x)\r\n x = Scale(axis=3, name='scale_conv1')(x)\r\n x = Activation('relu', name='conv1_relu')(x)\r\n x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)\r\n\r\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))\r\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')\r\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')\r\n\r\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')\r\n for i in range(1,8):\r\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b'+str(i))\r\n\r\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')\r\n for i in range(1,36):\r\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b'+str(i))\r\n\r\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')\r\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')\r\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')\r\n\r\n x_fc = AveragePooling2D((7, 7), name='avg_pool')(x)\r\n x_fc = Flatten()(x_fc)\r\n x_fc = Dense(1000, activation='softmax', name='fc1000')(x_fc)\r\n\r\n model = Model(img_input, x_fc)\r\n\r\n # Use pre-trained weights for Tensorflow backend\r\n weights_path = 'utils/weights/resnet152_weights_tf.h5'\r\n assert (os.path.exists(weights_path))\r\n\r\n model.load_weights(weights_path, by_name=True)\r\n\r\n # Truncate and replace softmax layer for transfer learning\r\n # Cannot use model.layers.pop() since model is not of Sequential() type\r\n # The method below works since pre-trained weights are stored in layers but not in the model\r\n x_newfc = AveragePooling2D((7, 7), name='avg_pool')(x)\r\n x_newfc = Flatten()(x_newfc)\r\n x_newfc = Dense(256, name='fc8')(x_newfc)\r\n\r\n model = Model(img_input, x_newfc)\r\n return model\r\n\r\n\r\ndef get_model(net, is_training, add_lstm=False, bn_decay=None, separately=False):\r\n \"\"\" ResNet152 regression model, input is BxWxHx3, output Bx2\"\"\"\r\n batch_size = net[0].get_shape()[0].value\r\n img_net, fmap_net = net[0], net[1]\r\n\r\n img_net = get_resnet(224, 224)(img_net)\r\n fmap_net = get_resnet(224, 224)(fmap_net)\r\n\r\n net = tf.reshape(tf.stack([img_net, fmap_net]), [batch_size, -1])\r\n\r\n if not add_lstm:\r\n for i, dim in enumerate([256, 128, 16]):\r\n fc_scope = \"fc\" + str(i + 1)\r\n dp_scope = \"dp\" + str(i + 1)\r\n net = tf_util.fully_connected(net, dim, bn=True,\r\n is_training=is_training,\r\n scope=fc_scope,\r\n bn_decay=bn_decay)\r\n net = tf_util.dropout(net, keep_prob=0.7,\r\n is_training=is_training,\r\n scope=dp_scope)\r\n net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc4')\r\n else:\r\n fc_scope = \"fc1\"\r\n net = tf_util.fully_connected(net, 784, bn=True,\r\n is_training=is_training,\r\n scope=fc_scope,\r\n bn_decay=bn_decay)\r\n net = tf_util.dropout(net, keep_prob=0.7,\r\n is_training=is_training,\r\n scope=\"dp1\")\r\n net = cnn_lstm_block(net)\r\n return net\r\n\r\n\r\ndef cnn_lstm_block(input_tensor):\r\n lstm_in = tf.reshape(input_tensor, [-1, 28, 28])\r\n lstm_out = tf_util.stacked_lstm(lstm_in,\r\n num_outputs=10,\r\n time_steps=28,\r\n scope=\"cnn_lstm\")\r\n\r\n W_final = tf.Variable(tf.truncated_normal([10, 2], stddev=0.1))\r\n b_final = tf.Variable(tf.truncated_normal([2], stddev=0.1))\r\n return tf.multiply(tf.atan(tf.matmul(lstm_out, W_final) + b_final), 2)\r\n\r\n\r\ndef identity_block(input_tensor, kernel_size, filters, stage, block):\r\n '''The identity_block is the block that has no conv layer at shortcut\r\n # Arguments\r\n input_tensor: input tensor\r\n kernel_size: defualt 3, the kernel size of middle conv layer at main path\r\n filters: list of integers, the nb_filters of 3 conv layer at main path\r\n stage: integer, current stage label, used for generating layer names\r\n block: 'a','b'..., current block label, used for generating layer names\r\n '''\r\n eps = 1.1e-5\r\n nb_filter1, nb_filter2, nb_filter3 = filters\r\n conv_name_base = 'res' + str(stage) + block + '_branch'\r\n bn_name_base = 'bn' + str(stage) + block + '_branch'\r\n scale_name_base = 'scale' + str(stage) + block + '_branch'\r\n\r\n x = Convolution2D(nb_filter1, (1, 1), name=conv_name_base + '2a', use_bias=False)(input_tensor)\r\n x = BatchNormalization(epsilon=eps, axis=3, name=bn_name_base + '2a')(x)\r\n x = Scale(axis=3, name=scale_name_base + '2a')(x)\r\n x = Activation('relu', name=conv_name_base + '2a_relu')(x)\r\n\r\n x = ZeroPadding2D((1, 1), name=conv_name_base + '2b_zeropadding')(x)\r\n x = Convolution2D(nb_filter2, (kernel_size, kernel_size),\r\n name=conv_name_base + '2b', use_bias=False)(x)\r\n x = BatchNormalization(epsilon=eps, axis=3, name=bn_name_base + '2b')(x)\r\n x = Scale(axis=3, name=scale_name_base + '2b')(x)\r\n x = Activation('relu', name=conv_name_base + '2b_relu')(x)\r\n\r\n x = Convolution2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=False)(x)\r\n x = BatchNormalization(epsilon=eps, axis=3, name=bn_name_base + '2c')(x)\r\n x = Scale(axis=3, name=scale_name_base + '2c')(x)\r\n\r\n x = add([x, input_tensor], name='res' + str(stage) + block)\r\n x = Activation('relu', name='res' + str(stage) + block + '_relu')(x)\r\n return x\r\n\r\n\r\ndef conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):\r\n '''conv_block is the block that has a conv layer at shortcut\r\n # Arguments\r\n input_tensor: input tensor\r\n kernel_size: defualt 3, the kernel size of middle conv layer at main path\r\n filters: list of integers, the nb_filters of 3 conv layer at main path\r\n stage: integer, current stage label, used for generating layer names\r\n block: 'a','b'..., current block label, used for generating layer names\r\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\r\n And the shortcut should have subsample=(2,2) as well\r\n '''\r\n eps = 1.1e-5\r\n nb_filter1, nb_filter2, nb_filter3 = filters\r\n conv_name_base = 'res' + str(stage) + block + '_branch'\r\n bn_name_base = 'bn' + str(stage) + block + '_branch'\r\n scale_name_base = 'scale' + str(stage) + block + '_branch'\r\n\r\n x = Convolution2D(nb_filter1, (1, 1), strides=strides,\r\n name=conv_name_base + '2a', use_bias=False)(input_tensor)\r\n x = BatchNormalization(epsilon=eps, axis=3, name=bn_name_base + '2a')(x)\r\n x = Scale(axis=3, name=scale_name_base + '2a')(x)\r\n x = Activation('relu', name=conv_name_base + '2a_relu')(x)\r\n\r\n x = ZeroPadding2D((1, 1), name=conv_name_base + '2b_zeropadding')(x)\r\n x = Convolution2D(nb_filter2, (kernel_size, kernel_size),\r\n name=conv_name_base + '2b', use_bias=False)(x)\r\n x = BatchNormalization(epsilon=eps, axis=3, name=bn_name_base + '2b')(x)\r\n x = Scale(axis=3, name=scale_name_base + '2b')(x)\r\n x = Activation('relu', name=conv_name_base + '2b_relu')(x)\r\n\r\n x = Convolution2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=False)(x)\r\n x = BatchNormalization(epsilon=eps, axis=3, name=bn_name_base + '2c')(x)\r\n x = Scale(axis=3, name=scale_name_base + '2c')(x)\r\n\r\n shortcut = Convolution2D(nb_filter3, (1, 1), strides=strides,\r\n name=conv_name_base + '1', use_bias=False)(input_tensor)\r\n shortcut = BatchNormalization(epsilon=eps, axis=3, name=bn_name_base + '1')(shortcut)\r\n shortcut = Scale(axis=3, name=scale_name_base + '1')(shortcut)\r\n\r\n x = add([x, shortcut], name='res' + str(stage) + block)\r\n x = Activation('relu', name='res' + str(stage) + block + '_relu')(x)\r\n return x\r\n\r\n\r\ndef get_loss(pred, label, l2_weight=0.0001):\r\n diff = tf.square(tf.subtract(pred, label))\r\n train_vars = tf.trainable_variables()\r\n l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in train_vars[1:]]) * l2_weight\r\n loss = tf.reduce_mean(diff + l2_loss)\r\n tf.summary.scalar('l2 loss', l2_loss * l2_weight)\r\n tf.summary.scalar('loss', loss)\r\n\r\n return loss\r\n\r\n\r\ndef summary_scalar(pred, label):\r\n threholds = [5, 4, 3, 2, 1, 0.5]\r\n angles = [float(t) / 180 * scipy.pi for t in threholds]\r\n speeds = [float(t) / 20 for t in threholds]\r\n\r\n for i in range(len(threholds)):\r\n scalar_angle = \"angle(\" + str(angles[i]) + \")\"\r\n scalar_speed = \"speed(\" + str(speeds[i]) + \")\"\r\n ac_angle = tf.abs(tf.subtract(pred[:, 1], label[:, 1])) < threholds[i]\r\n ac_speed = tf.abs(tf.subtract(pred[:, 0], label[:, 0])) < threholds[i]\r\n ac_angle = tf.reduce_mean(tf.cast(ac_angle, tf.float32))\r\n ac_speed = tf.reduce_mean(tf.cast(ac_speed, tf.float32))\r\n\r\n tf.summary.scalar(scalar_angle, ac_angle)\r\n tf.summary.scalar(scalar_speed, ac_speed)\r\n\r\n\r\ndef resize(imgs):\r\n batch_size = imgs.shape[0]\r\n imgs_new = []\r\n for j in range(batch_size):\r\n img = imgs[j,:,:,:]\r\n new = scipy.misc.imresize(img, (224, 224))\r\n imgs_new.append(new)\r\n imgs_new = np.stack(imgs_new, axis=0)\r\n return imgs_new\r\n\r\n\r\nif __name__ == '__main__':\r\n with tf.Graph().as_default():\r\n imgs = tf.zeros((32, 224, 224, 3))\r\n fmaps = tf.zeros((32, 224, 224, 3))\r\n outputs = get_model([imgs, fmaps], tf.constant(True))\r\n print(outputs)\r\n"
] |
[
[
"scipy.misc.imresize",
"tensorflow.matmul",
"tensorflow.truncated_normal",
"tensorflow.constant",
"tensorflow.Graph",
"tensorflow.reduce_mean",
"tensorflow.zeros",
"tensorflow.stack",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.placeholder",
"numpy.stack",
"tensorflow.subtract",
"tensorflow.nn.l2_loss",
"tensorflow.trainable_variables",
"tensorflow.summary.scalar"
]
] |
CGuichardMasterDL/MCS_DTW
|
[
"e3bc1fac3599774e1a5182bbd9e55bebdac44a94"
] |
[
"mcs_dtw/kppv.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n Algo de classification par k-plus proches voisins en utilisant la librairie sklearn\n\"\"\"\n\n#========== IMPORT ==========#\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\n\n\n#========== CLASSE ==========#\n\nk_ordre = 3\nk_locuteur = 9\n\n#======== FUNCTIONS =========#\n\n\ndef find_kppv_match(unknown_sound, base, params):\n \"\"\"\n Trouve le son le plus proche de unknown_sound dans le tableau de sons passé en paramètre\n \"\"\"\n (kppv, scaler, base_vectorielle) = params\n update_composantes_principales(unknown_sound, base_vectorielle, scaler)\n return base[kppv.predict([unknown_sound.get_composantes_principales()])[0]]\n\n\ndef pretraitement_acp(base):\n \"\"\"\n Construire la base de l'espace vectoriel correspondant aux vecteurs propres\n associés aux trois plus grandes valeurs propres de la matrice de covariance\n de la base d'apprentissage\n\n NB:\n\n PCA(n_components=3) indique qu'on souhaite réduire les données en 3 dimmensions\n\n acp.components_ contient alors la matrice 3*12 correspondant aux n_components\n vecteurs propres\n\n ---> (12,1)*(3,12) ---> (1,3) un point de l'espace\n \"\"\"\n learning_base = []\n for sound in base:\n learning_base.append(mean_mfcc(sound))\n\n scaler = StandardScaler()\n learning_base = scaler.fit_transform(learning_base)\n acp = PCA(n_components=3)\n acp.fit_transform(learning_base)\n\n for sound in base:\n update_composantes_principales(\n sound, np.transpose(acp.components_), scaler)\n\n kppv = KNeighborsClassifier(n_neighbors=1)\n data = [sound.get_composantes_principales() for sound in base]\n classes = range(len(base))\n kppv.fit(data, classes)\n\n return (kppv, scaler, np.transpose(acp.components_))\n\n\ndef pretraitement_acp_dual(base):\n \"\"\"\n Préparer l'analyse kppv sur les ordres et sur les locuteurs\n \"\"\"\n learning_base = []\n for sound in base:\n learning_base.append(mean_mfcc(sound))\n\n scaler = StandardScaler()\n learning_base = scaler.fit_transform(learning_base)\n acp = PCA(n_components=3)\n acp.fit_transform(learning_base)\n\n for sound in base:\n update_composantes_principales(\n sound, np.transpose(acp.components_), scaler)\n\n\n kppv_ordre = KNeighborsClassifier(n_neighbors=k_ordre)\n kppv_locuteur = KNeighborsClassifier(n_neighbors=k_locuteur)\n\n data = [sound.get_composantes_principales() for sound in base]\n classes_ordre = [sound.get_ordre() for sound in base]\n classes_locuteur = [sound.get_locuteur() for sound in base]\n\n kppv_ordre.fit(data, classes_ordre)\n kppv_locuteur.fit(data, classes_locuteur)\n return (kppv_ordre, kppv_locuteur, scaler, np.transpose(acp.components_))\n\n\ndef find_dual_kppv_match(unknown_sound, base, params):\n \"\"\"\n Trouve le son le plus proche de unknown_sound dans le tableau de sons passé en paramètre\n \"\"\"\n (kppv_ordre, kppv_locuteur, scaler, base_vectorielle) = params\n update_composantes_principales(unknown_sound, base_vectorielle, scaler)\n\n ordre_predis = kppv_ordre.predict(\n [unknown_sound.get_composantes_principales()])[0]\n\n locuteur_predis = kppv_locuteur.predict(\n [unknown_sound.get_composantes_principales()])[0]\n\n return [x for x in base if x.get_ordre() == ordre_predis\n and x.get_locuteur() == locuteur_predis][0]\n\n\ndef mean_mfcc(sound):\n \"\"\"\n Retourne le vecteur de taille 12 correspondant à la moyenne des n fenêtres\n d'une mfcc\n \"\"\"\n meaned_mfcc = []\n for fenetre in np.transpose(sound.get_mfcc()):\n meaned_mfcc.append(np.mean(fenetre))\n return meaned_mfcc\n\n\ndef update_composantes_principales(sound, base_vectorielle, scaler):\n \"\"\"\n Mise à jour des 3 coordonnées représentatives d'un son\n d'après la base vectorielle passée en paramètre\n \"\"\"\n meaned_mfcc = scaler.transform([mean_mfcc(sound)])\n sound.set_composantes_principales(\n np.dot(meaned_mfcc, base_vectorielle)[0])\n\n\ndef etude_valeurs_k_ordre_locuteur(learning_framework, base_test):\n \"\"\"\n Comparer l'algorithme kppv dual pour toutes les valeurs\n de 0 < k_ordre < 18 (car 18 locuteurs max par ordre dans le voisinage)\n et 0 < k_locuteur < 13 (car 13 ordres max par locuteur dans le voisinage)\n \"\"\"\n data = []\n global k_ordre, k_locuteur\n original_ko = k_ordre\n original_kl = k_locuteur\n range_ordre = range(1, 19)\n range_locuteur = range(1, 14)\n for k_ordre in range_ordre:\n data_line = []\n for k_locuteur in range_locuteur:\n result = learning_framework.analyse(\n base_test, find_dual_kppv_match, pretraitement_acp_dual).get_stats()\n data_line.append(100*result[1])\n data.append(data_line)\n k_ordre = original_ko\n k_locuteur = original_kl\n return (data, range_ordre, range_locuteur)\n\n\ndef show_etude_valeurs_k(results):\n \"\"\"\n matplotlib hell\n \"\"\"\n fig = plt.figure()\n axes = fig.gca(projection='3d')\n\n z_ax = np.transpose(np.asarray(results[0]))\n x_ax = np.asarray(results[1])\n y_ax = np.asarray(results[2])\n\n x_ax, y_ax = np.meshgrid(x_ax, y_ax)\n surf = axes.plot_surface(x_ax, y_ax, z_ax, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\n axes.zaxis.set_major_locator(LinearLocator(10))\n axes.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n axes.set_xlabel(\"k_ordre\")\n axes.set_ylabel(\"k_locuteur\")\n axes.set_zlabel(\"Taux de reconnaissance ordre et locuteur %\")\n\n fig.colorbar(surf, shrink=0.5, aspect=5)\n\n plt.show()\n\n\ndef affichages_effets_audios(sounds):\n \"\"\"\n Afficher en 3d les points correspondants aux fichiers audios en paramètres\n En mettant en évidence les différences entre les effets audios\n \"\"\"\n plt.figure()\n axes = plt.axes(projection='3d')\n\n sounds_dict = {}\n for sound in sounds:\n if sound.get_locuteur()+\" : \"+sound.get_ordre() not in sounds_dict:\n sounds_dict[sound.get_locuteur()+\" : \"+sound.get_ordre()] = (\n [sound],\n [sound.get_composantes_principales()[0]],\n [sound.get_composantes_principales()[1]],\n [sound.get_composantes_principales()[2]],\n )\n else:\n sounds_dict[sound.get_locuteur()+\" : \"+sound.get_ordre()\n ][0].append(sound)\n sounds_dict[sound.get_locuteur()+\" : \"+sound.get_ordre()\n ][1].append(sound.get_composantes_principales()[0])\n sounds_dict[sound.get_locuteur()+\" : \"+sound.get_ordre()\n ][2].append(sound.get_composantes_principales()[1])\n sounds_dict[sound.get_locuteur()+\" : \"+sound.get_ordre()\n ][3].append(sound.get_composantes_principales()[2])\n\n for key, value in sounds_dict.items():\n axes.plot(value[1], value[2], value[3])\n axes.text(value[1][0], value[2][0], value[3][0], '%s' % (key+\"\\n\"+sound.get_effet()), # pylint: disable=W0631\n size=9, zorder=1, color='k')\n for i, sound in enumerate(value[0]):\n if i != 0:\n (x_ax, y_ax, z_ax) = sound.get_composantes_principales()\n axes.text(x_ax, y_ax, z_ax, '%s' % (sound.get_effet()),\n size=8, zorder=1, color='k')\n\n plt.show()\n"
] |
[
[
"numpy.dot",
"numpy.asarray",
"sklearn.neighbors.KNeighborsClassifier",
"matplotlib.pyplot.axes",
"matplotlib.ticker.LinearLocator",
"numpy.mean",
"numpy.transpose",
"matplotlib.ticker.FormatStrFormatter",
"sklearn.preprocessing.StandardScaler",
"numpy.meshgrid",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
mljack/OBBDet_Swin
|
[
"9491f1277babb31e900dc5f5e6a117bf46b62961"
] |
[
"mmdet/ops/dcn/deform_conv.py"
] |
[
"import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import CONV_LAYERS\nfrom mmcv.utils import print_log\nfrom torch.autograd import Function\nfrom torch.autograd.function import once_differentiable\nfrom torch.nn.modules.utils import _pair, _single\n\nfrom . import deform_conv_ext\n\n\nclass DeformConvFunction(Function):\n\n @staticmethod\n def forward(ctx,\n input,\n offset,\n weight,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n deformable_groups=1,\n im2col_step=64):\n if input is not None and input.dim() != 4:\n raise ValueError(f'Expected 4D tensor as input, got {input.dim()}'\n 'D tensor instead.')\n ctx.stride = _pair(stride)\n ctx.padding = _pair(padding)\n ctx.dilation = _pair(dilation)\n ctx.groups = groups\n ctx.deformable_groups = deformable_groups\n ctx.im2col_step = im2col_step\n\n ctx.save_for_backward(input, offset, weight)\n\n output = input.new_empty(\n DeformConvFunction._output_size(input, weight, ctx.padding,\n ctx.dilation, ctx.stride))\n\n ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones\n\n if not input.is_cuda:\n raise NotImplementedError\n else:\n cur_im2col_step = min(ctx.im2col_step, input.shape[0])\n assert (input.shape[0] %\n cur_im2col_step) == 0, 'im2col step must divide batchsize'\n deform_conv_ext.deform_conv_forward(\n input, weight, offset, output, ctx.bufs_[0], ctx.bufs_[1],\n weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0],\n ctx.padding[1], ctx.padding[0], ctx.dilation[1],\n ctx.dilation[0], ctx.groups, ctx.deformable_groups,\n cur_im2col_step)\n return output\n\n @staticmethod\n @once_differentiable\n def backward(ctx, grad_output):\n input, offset, weight = ctx.saved_tensors\n\n grad_input = grad_offset = grad_weight = None\n\n if not grad_output.is_cuda:\n raise NotImplementedError\n else:\n cur_im2col_step = min(ctx.im2col_step, input.shape[0])\n assert (input.shape[0] %\n cur_im2col_step) == 0, 'im2col step must divide batchsize'\n\n if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:\n grad_input = torch.zeros_like(input)\n grad_offset = torch.zeros_like(offset)\n deform_conv_ext.deform_conv_backward_input(\n input, offset, grad_output, grad_input,\n grad_offset, weight, ctx.bufs_[0], weight.size(3),\n weight.size(2), ctx.stride[1], ctx.stride[0],\n ctx.padding[1], ctx.padding[0], ctx.dilation[1],\n ctx.dilation[0], ctx.groups, ctx.deformable_groups,\n cur_im2col_step)\n\n if ctx.needs_input_grad[2]:\n grad_weight = torch.zeros_like(weight)\n deform_conv_ext.deform_conv_backward_parameters(\n input, offset, grad_output,\n grad_weight, ctx.bufs_[0], ctx.bufs_[1], weight.size(3),\n weight.size(2), ctx.stride[1], ctx.stride[0],\n ctx.padding[1], ctx.padding[0], ctx.dilation[1],\n ctx.dilation[0], ctx.groups, ctx.deformable_groups, 1,\n cur_im2col_step)\n\n return (grad_input, grad_offset, grad_weight, None, None, None, None,\n None)\n\n @staticmethod\n def _output_size(input, weight, padding, dilation, stride):\n channels = weight.size(0)\n output_size = (input.size(0), channels)\n for d in range(input.dim() - 2):\n in_size = input.size(d + 2)\n pad = padding[d]\n kernel = dilation[d] * (weight.size(d + 2) - 1) + 1\n stride_ = stride[d]\n output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, )\n if not all(map(lambda s: s > 0, output_size)):\n raise ValueError('convolution input is too small (output would be '\n f'{\"x\".join(map(str, output_size))})')\n return output_size\n\n\nclass ModulatedDeformConvFunction(Function):\n\n @staticmethod\n def forward(ctx,\n input,\n offset,\n mask,\n weight,\n bias=None,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n deformable_groups=1):\n ctx.stride = stride\n ctx.padding = padding\n ctx.dilation = dilation\n ctx.groups = groups\n ctx.deformable_groups = deformable_groups\n ctx.with_bias = bias is not None\n if not ctx.with_bias:\n bias = input.new_empty(1) # fake tensor\n if not input.is_cuda:\n raise NotImplementedError\n if weight.requires_grad or mask.requires_grad or offset.requires_grad \\\n or input.requires_grad:\n ctx.save_for_backward(input, offset, mask, weight, bias)\n output = input.new_empty(\n ModulatedDeformConvFunction._infer_shape(ctx, input, weight))\n ctx._bufs = [input.new_empty(0), input.new_empty(0)]\n deform_conv_ext.modulated_deform_conv_forward(\n input, weight, bias, ctx._bufs[0], offset, mask, output,\n ctx._bufs[1], weight.shape[2], weight.shape[3], ctx.stride,\n ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation,\n ctx.groups, ctx.deformable_groups, ctx.with_bias)\n return output\n\n @staticmethod\n @once_differentiable\n def backward(ctx, grad_output):\n if not grad_output.is_cuda:\n raise NotImplementedError\n input, offset, mask, weight, bias = ctx.saved_tensors\n grad_input = torch.zeros_like(input)\n grad_offset = torch.zeros_like(offset)\n grad_mask = torch.zeros_like(mask)\n grad_weight = torch.zeros_like(weight)\n grad_bias = torch.zeros_like(bias)\n deform_conv_ext.modulated_deform_conv_backward(\n input, weight, bias, ctx._bufs[0], offset, mask, ctx._bufs[1],\n grad_input, grad_weight, grad_bias, grad_offset, grad_mask,\n grad_output, weight.shape[2], weight.shape[3], ctx.stride,\n ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation,\n ctx.groups, ctx.deformable_groups, ctx.with_bias)\n if not ctx.with_bias:\n grad_bias = None\n\n return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias,\n None, None, None, None, None)\n\n @staticmethod\n def _infer_shape(ctx, input, weight):\n n = input.size(0)\n channels_out = weight.size(0)\n height, width = input.shape[2:4]\n kernel_h, kernel_w = weight.shape[2:4]\n # TODO: support different padding/stride/dilation in height and width\n height_out = (height + 2 * ctx.padding -\n (ctx.dilation * (kernel_h - 1) + 1)) // ctx.stride + 1\n width_out = (width + 2 * ctx.padding -\n (ctx.dilation * (kernel_w - 1) + 1)) // ctx.stride + 1\n return n, channels_out, height_out, width_out\n\n\ndeform_conv = DeformConvFunction.apply\nmodulated_deform_conv = ModulatedDeformConvFunction.apply\n\n\nclass DeformConv(nn.Module):\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n deformable_groups=1,\n bias=False):\n super(DeformConv, self).__init__()\n\n assert not bias\n assert in_channels % groups == 0, \\\n f'in_channels {in_channels} is not divisible by groups {groups}'\n assert out_channels % groups == 0, \\\n f'out_channels {out_channels} is not divisible ' \\\n f'by groups {groups}'\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = _pair(kernel_size)\n self.stride = _pair(stride)\n self.padding = _pair(padding)\n self.dilation = _pair(dilation)\n self.groups = groups\n self.deformable_groups = deformable_groups\n # enable compatibility with nn.Conv2d\n self.transposed = False\n self.output_padding = _single(0)\n\n self.weight = nn.Parameter(\n torch.Tensor(out_channels, in_channels // self.groups,\n *self.kernel_size))\n\n self.reset_parameters()\n\n def reset_parameters(self):\n n = self.in_channels\n for k in self.kernel_size:\n n *= k\n stdv = 1. / math.sqrt(n)\n self.weight.data.uniform_(-stdv, stdv)\n\n def forward(self, x, offset):\n # To fix an assert error in deform_conv_cuda.cpp:128\n # input image is smaller than kernel\n input_pad = (\n x.size(2) < self.kernel_size[0] or x.size(3) < self.kernel_size[1])\n if input_pad:\n pad_h = max(self.kernel_size[0] - x.size(2), 0)\n pad_w = max(self.kernel_size[1] - x.size(3), 0)\n x = F.pad(x, (0, pad_w, 0, pad_h), 'constant', 0).contiguous()\n offset = F.pad(offset, (0, pad_w, 0, pad_h), 'constant',\n 0).contiguous()\n out = deform_conv(x, offset, self.weight, self.stride, self.padding,\n self.dilation, self.groups, self.deformable_groups)\n if input_pad:\n out = out[:, :, :out.size(2) - pad_h, :out.size(3) -\n pad_w].contiguous()\n return out\n\n\n# @CONV_LAYERS.register_module(name='DCN')\nclass DeformConvPack(DeformConv):\n \"\"\"A Deformable Conv Encapsulation that acts as normal Conv layers.\n\n The offset tensor is like `[y0, x0, y1, x1, y2, x2, ..., y8, x8]`.\n The spatial arrangement is like:\n ```\n (x0, y0) (x1, y1) (x2, y2)\n (x3, y3) (x4, y4) (x5, y5)\n (x6, y6) (x7, y7) (x8, y8)\n ```\n\n Args:\n in_channels (int): Same as nn.Conv2d.\n out_channels (int): Same as nn.Conv2d.\n kernel_size (int or tuple[int]): Same as nn.Conv2d.\n stride (int or tuple[int]): Same as nn.Conv2d.\n padding (int or tuple[int]): Same as nn.Conv2d.\n dilation (int or tuple[int]): Same as nn.Conv2d.\n groups (int): Same as nn.Conv2d.\n bias (bool or str): If specified as `auto`, it will be decided by the\n norm_cfg. Bias will be set as True if norm_cfg is None, otherwise\n False.\n \"\"\"\n\n _version = 2\n\n def __init__(self, *args, **kwargs):\n super(DeformConvPack, self).__init__(*args, **kwargs)\n\n self.conv_offset = nn.Conv2d(\n self.in_channels,\n self.deformable_groups * 2 * self.kernel_size[0] *\n self.kernel_size[1],\n kernel_size=self.kernel_size,\n stride=_pair(self.stride),\n padding=_pair(self.padding),\n dilation=_pair(self.dilation),\n bias=True)\n self.init_offset()\n\n def init_offset(self):\n self.conv_offset.weight.data.zero_()\n self.conv_offset.bias.data.zero_()\n\n def forward(self, x):\n offset = self.conv_offset(x)\n return deform_conv(x, offset, self.weight, self.stride, self.padding,\n self.dilation, self.groups, self.deformable_groups)\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n version = local_metadata.get('version', None)\n\n if version is None or version < 2:\n # the key is different in early versions\n # In version < 2, DeformConvPack loads previous benchmark models.\n if (prefix + 'conv_offset.weight' not in state_dict\n and prefix[:-1] + '_offset.weight' in state_dict):\n state_dict[prefix + 'conv_offset.weight'] = state_dict.pop(\n prefix[:-1] + '_offset.weight')\n if (prefix + 'conv_offset.bias' not in state_dict\n and prefix[:-1] + '_offset.bias' in state_dict):\n state_dict[prefix +\n 'conv_offset.bias'] = state_dict.pop(prefix[:-1] +\n '_offset.bias')\n\n if version is not None and version > 1:\n print_log(\n f'DeformConvPack {prefix.rstrip(\".\")} is upgraded to '\n 'version 2.',\n logger='root')\n\n super()._load_from_state_dict(state_dict, prefix, local_metadata,\n strict, missing_keys, unexpected_keys,\n error_msgs)\n\n\nclass ModulatedDeformConv(nn.Module):\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n deformable_groups=1,\n bias=True):\n super(ModulatedDeformConv, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = _pair(kernel_size)\n self.stride = stride\n self.padding = padding\n self.dilation = dilation\n self.groups = groups\n self.deformable_groups = deformable_groups\n self.with_bias = bias\n # enable compatibility with nn.Conv2d\n self.transposed = False\n self.output_padding = _single(0)\n\n self.weight = nn.Parameter(\n torch.Tensor(out_channels, in_channels // groups,\n *self.kernel_size))\n if bias:\n self.bias = nn.Parameter(torch.Tensor(out_channels))\n else:\n self.register_parameter('bias', None)\n self.init_weights()\n\n def init_weights(self):\n n = self.in_channels\n for k in self.kernel_size:\n n *= k\n stdv = 1. / math.sqrt(n)\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.zero_()\n\n def forward(self, x, offset, mask):\n return modulated_deform_conv(x, offset, mask, self.weight, self.bias,\n self.stride, self.padding, self.dilation,\n self.groups, self.deformable_groups)\n\n\n# @CONV_LAYERS.register_module(name='DCNv2')\nclass ModulatedDeformConvPack(ModulatedDeformConv):\n \"\"\"A ModulatedDeformable Conv Encapsulation that acts as normal Conv layers.\n\n Args:\n in_channels (int): Same as nn.Conv2d.\n out_channels (int): Same as nn.Conv2d.\n kernel_size (int or tuple[int]): Same as nn.Conv2d.\n stride (int): Same as nn.Conv2d, while tuple is not supported.\n padding (int): Same as nn.Conv2d, while tuple is not supported.\n dilation (int): Same as nn.Conv2d, while tuple is not supported.\n groups (int): Same as nn.Conv2d.\n bias (bool or str): If specified as `auto`, it will be decided by the\n norm_cfg. Bias will be set as True if norm_cfg is None, otherwise\n False.\n \"\"\"\n\n _version = 2\n\n def __init__(self, *args, **kwargs):\n super(ModulatedDeformConvPack, self).__init__(*args, **kwargs)\n\n self.conv_offset = nn.Conv2d(\n self.in_channels,\n self.deformable_groups * 3 * self.kernel_size[0] *\n self.kernel_size[1],\n kernel_size=self.kernel_size,\n stride=_pair(self.stride),\n padding=_pair(self.padding),\n dilation=_pair(self.dilation),\n bias=True)\n self.init_weights()\n\n def init_weights(self):\n super(ModulatedDeformConvPack, self).init_weights()\n if hasattr(self, 'conv_offset'):\n self.conv_offset.weight.data.zero_()\n self.conv_offset.bias.data.zero_()\n\n def forward(self, x):\n out = self.conv_offset(x)\n o1, o2, mask = torch.chunk(out, 3, dim=1)\n offset = torch.cat((o1, o2), dim=1)\n mask = torch.sigmoid(mask)\n return modulated_deform_conv(x, offset, mask, self.weight, self.bias,\n self.stride, self.padding, self.dilation,\n self.groups, self.deformable_groups)\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n version = local_metadata.get('version', None)\n\n if version is None or version < 2:\n # the key is different in early versions\n # In version < 2, ModulatedDeformConvPack\n # loads previous benchmark models.\n if (prefix + 'conv_offset.weight' not in state_dict\n and prefix[:-1] + '_offset.weight' in state_dict):\n state_dict[prefix + 'conv_offset.weight'] = state_dict.pop(\n prefix[:-1] + '_offset.weight')\n if (prefix + 'conv_offset.bias' not in state_dict\n and prefix[:-1] + '_offset.bias' in state_dict):\n state_dict[prefix +\n 'conv_offset.bias'] = state_dict.pop(prefix[:-1] +\n '_offset.bias')\n\n if version is not None and version > 1:\n print_log(\n f'ModulatedDeformConvPack {prefix.rstrip(\".\")} is upgraded to '\n 'version 2.',\n logger='root')\n\n super()._load_from_state_dict(state_dict, prefix, local_metadata,\n strict, missing_keys, unexpected_keys,\n error_msgs)\n"
] |
[
[
"torch.sigmoid",
"torch.nn.modules.utils._single",
"torch.Tensor",
"torch.cat",
"torch.zeros_like",
"torch.nn.modules.utils._pair",
"torch.chunk",
"torch.nn.functional.pad"
]
] |
yohney/pytorch-ssd
|
[
"205767324f64f1638debcb7247cc81f04b826b37"
] |
[
"vision/ssd/mobilenet_v2_ssd_lite.py"
] |
[
"import torch\nfrom torch.nn import Conv2d, Sequential, ModuleList, BatchNorm2d\nfrom torch import nn\nfrom ..nn.mobilenet_v2 import MobileNetV2, InvertedResidual\n\nfrom .ssd import SSD, GraphPath\nfrom .predictor import Predictor\nfrom .config import mobilenetv1_ssd_config as config\n\n\ndef SeperableConv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, onnx_compatible=False):\n \"\"\"Replace Conv2d with a depthwise Conv2d and Pointwise Conv2d.\n \"\"\"\n ReLU = nn.ReLU if onnx_compatible else nn.ReLU6\n return Sequential(\n Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size,\n groups=in_channels, stride=stride, padding=padding),\n BatchNorm2d(in_channels),\n ReLU(),\n Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1),\n )\n\n\ndef create_mobilenetv2_ssd_lite(num_classes, width_mult=1.0, use_batch_norm=True, onnx_compatible=False, is_test=False, convert_to_boxes=True):\n base_net = MobileNetV2(width_mult=width_mult, use_batch_norm=use_batch_norm,\n onnx_compatible=onnx_compatible).features\n\n source_layer_indexes = [\n GraphPath(14, 'conv', 3),\n 19,\n ]\n extras = ModuleList([\n InvertedResidual(1280, 512, stride=2, expand_ratio=0.2),\n InvertedResidual(512, 256, stride=2, expand_ratio=0.25),\n InvertedResidual(256, 256, stride=2, expand_ratio=0.5),\n InvertedResidual(256, 64, stride=2, expand_ratio=0.25)\n ])\n\n regression_headers = ModuleList([\n SeperableConv2d(in_channels=round(576 * width_mult), out_channels=6 * 4,\n kernel_size=3, padding=1, onnx_compatible=False),\n SeperableConv2d(in_channels=1280, out_channels=6 * 4, kernel_size=3, padding=1, onnx_compatible=False),\n SeperableConv2d(in_channels=512, out_channels=6 * 4, kernel_size=3, padding=1, onnx_compatible=False),\n SeperableConv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1, onnx_compatible=False),\n SeperableConv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1, onnx_compatible=False),\n Conv2d(in_channels=64, out_channels=6 * 4, kernel_size=1),\n ])\n\n classification_headers = ModuleList([\n SeperableConv2d(in_channels=round(576 * width_mult), out_channels=6 * num_classes, kernel_size=3, padding=1),\n SeperableConv2d(in_channels=1280, out_channels=6 * num_classes, kernel_size=3, padding=1),\n SeperableConv2d(in_channels=512, out_channels=6 * num_classes, kernel_size=3, padding=1),\n SeperableConv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1),\n SeperableConv2d(in_channels=256, out_channels=6 * num_classes, kernel_size=3, padding=1),\n Conv2d(in_channels=64, out_channels=6 * num_classes, kernel_size=1),\n ])\n\n return SSD(num_classes, base_net, source_layer_indexes,\n extras, classification_headers, regression_headers, is_test=is_test, config=config, convert_to_boxes=convert_to_boxes)\n\n\ndef create_mobilenetv2_ssd_lite_predictor(net, candidate_size=200, nms_method=None, sigma=0.5, device=torch.device('cpu')):\n predictor = Predictor(net, config.image_size, config.image_mean,\n config.image_std,\n nms_method=nms_method,\n iou_threshold=config.iou_threshold,\n candidate_size=candidate_size,\n sigma=sigma,\n device=device)\n return predictor\n"
] |
[
[
"torch.device",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d"
]
] |
SURAJtheMAKER/Tflite
|
[
"c7e966b5e35ee7fc511c1efe84dba8d3558f2b1c"
] |
[
"tensorflow/python/ops/array_ops.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# Tests for this file live in python/kernel_tests/array_ops_test.py\n\"\"\"Support for manipulating tensors.\n\nSee the @{$python/array_ops} guide.\n\n@@string_to_number\n@@to_double\n@@to_float\n@@to_bfloat16\n@@to_int32\n@@to_int64\n@@cast\n@@bitcast\n@@saturate_cast\n@@broadcast_dynamic_shape\n@@broadcast_static_shape\n@@shape\n@@shape_n\n@@size\n@@rank\n@@reshape\n@@squeeze\n@@expand_dims\n@@unravel_index\n@@meshgrid\n@@slice\n@@strided_slice\n@@split\n@@tile\n@@pad\n@@concat\n@@stack\n@@parallel_stack\n@@unstack\n@@reverse_sequence\n@@reverse\n@@reverse_v2\n@@transpose\n@@extract_image_patches\n@@space_to_batch_nd\n@@space_to_batch\n@@required_space_to_batch_paddings\n@@batch_to_space_nd\n@@batch_to_space\n@@space_to_depth\n@@depth_to_space\n@@gather\n@@gather_nd\n@@unique_with_counts\n@@scatter_nd\n@@dynamic_partition\n@@dynamic_stitch\n@@boolean_mask\n@@one_hot\n@@sequence_mask\n@@dequantize\n@@quantize\n@@quantize_v2\n@@quantized_concat\n@@setdiff1d\n@@guarantee_const\n@@fake_quant_with_min_max_args\n@@fake_quant_with_min_max_args_gradient\n@@fake_quant_with_min_max_vars\n@@fake_quant_with_min_max_vars_gradient\n@@fake_quant_with_min_max_vars_per_channel\n@@fake_quant_with_min_max_vars_per_channel_gradient\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import common_shapes\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\n# 'Constant' gets imported in the module 'array_ops'.\nfrom tensorflow.python.framework.constant_op import constant\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_math_ops\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.gen_array_ops import *\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.tf_export import tf_export\n# pylint: enable=wildcard-import\n\n# Used for slicing to specify a new 1 size dimension\nnewaxis = None\ntf_export(\"newaxis\").export_constant(__name__, \"newaxis\")\n\n# We override the 'slice' for the \"slice\" op, so we keep python's\n# existing 'slice' for later use in this module.\n_BaseSlice = slice\n\n\n@tf_export(\"identity\")\ndef identity(input, name=None): # pylint: disable=redefined-builtin\n r\"\"\"Return a tensor with the same shape and contents as input.\n\n Args:\n input: A `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n \"\"\"\n if context.in_graph_mode():\n return gen_array_ops.identity(input, name=name)\n else:\n input = ops.convert_to_tensor(input)\n in_device = input.device\n # TODO(ashankar): Does 'identity' need to invoke execution callbacks?\n if context.context().device_name != in_device:\n return input._copy() # pylint: disable=protected-access\n return input\n\n\n# pylint: disable=redefined-builtin,protected-access\n@tf_export(\"expand_dims\")\ndef expand_dims(input, axis=None, name=None, dim=None):\n \"\"\"Inserts a dimension of 1 into a tensor's shape.\n\n Given a tensor `input`, this operation inserts a dimension of 1 at the\n dimension index `axis` of `input`'s shape. The dimension index `axis` starts\n at zero; if you specify a negative number for `axis` it is counted backward\n from the end.\n\n This operation is useful if you want to add a batch dimension to a single\n element. For example, if you have a single image of shape `[height, width,\n channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,\n which will make the shape `[1, height, width, channels]`.\n\n Other examples:\n\n ```python\n # 't' is a tensor of shape [2]\n tf.shape(tf.expand_dims(t, 0)) # [1, 2]\n tf.shape(tf.expand_dims(t, 1)) # [2, 1]\n tf.shape(tf.expand_dims(t, -1)) # [2, 1]\n\n # 't2' is a tensor of shape [2, 3, 5]\n tf.shape(tf.expand_dims(t2, 0)) # [1, 2, 3, 5]\n tf.shape(tf.expand_dims(t2, 2)) # [2, 3, 1, 5]\n tf.shape(tf.expand_dims(t2, 3)) # [2, 3, 5, 1]\n ```\n\n This operation requires that:\n\n `-1-input.dims() <= dim <= input.dims()`\n\n This operation is related to `squeeze()`, which removes dimensions of\n size 1.\n\n Args:\n input: A `Tensor`.\n axis: 0-D (scalar). Specifies the dimension index at which to\n expand the shape of `input`. Must be in the range\n `[-rank(input) - 1, rank(input)]`.\n name: The name of the output `Tensor`.\n dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.\n\n Returns:\n A `Tensor` with the same data as `input`, but its shape has an additional\n dimension of size 1 added.\n\n Raises:\n ValueError: if both `dim` and `axis` are specified.\n \"\"\"\n # TODO(aselle): Remove argument dim\n if dim is not None:\n if axis is not None:\n raise ValueError(\"can't specify both 'dim' and 'axis'\")\n axis = dim\n return gen_array_ops._expand_dims(input, axis, name)\n\n\n# pylint: enable=redefined-builtin,protected-access\n\n\n# Aliases for some automatically-generated names.\n# pylint: disable=protected-access\[email protected](\n \"2016-11-30\",\n \"This op will be removed after the deprecation date. \"\n \"Please switch to tf.setdiff1d().\")\ndef listdiff(x, y, out_idx=None, name=None):\n return gen_array_ops._list_diff(x, y, out_idx, name)\n\n\nlistdiff.__doc__ = gen_array_ops._list_diff.__doc__ + \"\\n\" + listdiff.__doc__\n\n# pylint: enable=protected-access\n\n\n# pylint: disable=undefined-variable,protected-access\n@tf_export(\"setdiff1d\")\ndef setdiff1d(x, y, index_dtype=dtypes.int32, name=None):\n return gen_array_ops._list_diff(x, y, index_dtype, name)\n\n\nsetdiff1d.__doc__ = gen_array_ops._list_diff.__doc__\n\n# pylint: enable=protected-access\n\n\n@tf_export(\"broadcast_dynamic_shape\")\ndef broadcast_dynamic_shape(shape_x, shape_y):\n # pylint: disable=protected-access\n \"\"\"Returns the broadcasted dynamic shape between `shape_x` and `shape_y`.\n\n Args:\n shape_x: A rank 1 integer `Tensor`, representing the shape of x.\n shape_y: A rank 1 integer `Tensor`, representing the shape of y.\n\n Returns:\n A rank 1 integer `Tensor` representing the broadcasted shape.\n \"\"\"\n return gen_array_ops._broadcast_args(shape_x, shape_y)\n # pylint: enable=protected-access\n\n\n@tf_export(\"broadcast_static_shape\")\ndef broadcast_static_shape(shape_x, shape_y):\n \"\"\"Returns the broadcasted static shape between `shape_x` and `shape_y`.\n\n Args:\n shape_x: A `TensorShape`\n shape_y: A `TensorShape`\n\n Returns:\n A `TensorShape` representing the broadcasted shape.\n\n Raises:\n ValueError: If the two shapes can not be broadcasted.\n \"\"\"\n return common_shapes.broadcast_shape(shape_x, shape_y)\n\n\n@tf_export(\"shape\")\ndef shape(input, name=None, out_type=dtypes.int32):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the shape of a tensor.\n\n This operation returns a 1-D integer tensor representing the shape of `input`.\n\n For example:\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n tf.shape(t) # [2, 2, 3]\n ```\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n out_type: (Optional) The specified output type of the operation\n (`int32` or `int64`). Defaults to `tf.int32`.\n\n Returns:\n A `Tensor` of type `out_type`.\n \"\"\"\n return shape_internal(input, name, optimize=True, out_type=out_type)\n\n\ndef shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the shape of a tensor.\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n optimize: if true, encode the shape as a constant when possible.\n out_type: (Optional) The specified output type of the operation\n (`int32` or `int64`). Defaults to tf.int32.\n\n Returns:\n A `Tensor` of type `out_type`.\n\n \"\"\"\n with ops.name_scope(name, \"Shape\", [input]) as name:\n if isinstance(input, (sparse_tensor.SparseTensor,\n sparse_tensor.SparseTensorValue)):\n return gen_math_ops.cast(input.dense_shape, out_type)\n else:\n if context.in_graph_mode():\n input_tensor = ops.convert_to_tensor(input)\n input_shape = input_tensor.get_shape()\n if optimize and input_shape.is_fully_defined():\n return constant(input_shape.as_list(), out_type, name=name)\n return gen_array_ops.shape(input, name=name, out_type=out_type)\n\n\n@tf_export(\"shape_n\")\ndef shape_n(input, out_type=dtypes.int32, name=None):\n # pylint: disable=redefined-builtin\n \"\"\"Returns shape of tensors.\n\n Args:\n input: A list of at least 1 `Tensor` object with the same type.\n out_type: The specified output type of the operation\n (`int32` or `int64`). Defaults to `tf.int32`(optional).\n name: A name for the operation (optional).\n\n Returns:\n A list with the same length as `input` of `Tensor` objects with\n type `out_type`.\n \"\"\"\n\n output = gen_array_ops.shape_n(input, out_type=out_type, name=name)\n if context.in_graph_mode():\n for i, input_tensor in enumerate(input):\n input_tensor = ops.convert_to_tensor(input_tensor)\n input_shape = input_tensor.get_shape()\n if input_shape.is_fully_defined():\n output[i] = constant(\n input_shape.as_list(), dtype=out_type, name=name)\n return output\n\n\n@tf_export(\"size\")\ndef size(input, name=None, out_type=dtypes.int32):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the size of a tensor.\n\n Returns a 0-D `Tensor` representing the number of elements in `input`\n of type `out_type`. Defaults to tf.int32.\n\n For example:\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n tf.size(t) # 12\n ```\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n out_type: (Optional) The specified non-quantized numeric output type\n of the operation. Defaults to `tf.int32`.\n\n Returns:\n A `Tensor` of type `out_type`. Defaults to `tf.int32`.\n\n @compatibility(numpy)\n Equivalent to np.size()\n @end_compatibility\n \"\"\"\n return size_internal(input, name, optimize=True, out_type=out_type)\n\n\ndef size_internal(input, name=None, optimize=True, out_type=dtypes.int32):\n # pylint: disable=redefined-builtin,protected-access\n \"\"\"Returns the size of a tensor.\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n optimize: if true, encode the size as a constant when possible.\n out_type: (Optional) The specified non-quantized numeric output type\n of the operation. Defaults to `tf.int32`.\n\n Returns:\n A `Tensor` of type `out_type`. Defaults to `tf.int32`.\n \"\"\"\n if context.in_eager_mode() and not isinstance(\n input, (sparse_tensor.SparseTensor,\n sparse_tensor.SparseTensorValue)):\n size_ = 1\n for dim in ops.convert_to_tensor(input)._shape_tuple(): # pylint: disable=protected-access\n size_ *= dim\n return size_\n with ops.name_scope(name, \"Size\", [input]) as name:\n if isinstance(input, (sparse_tensor.SparseTensor,\n sparse_tensor.SparseTensorValue)):\n return gen_math_ops._prod(\n gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)\n else:\n input_tensor = ops.convert_to_tensor(input)\n input_shape = input_tensor.get_shape()\n if optimize and input_shape.is_fully_defined():\n return constant(input_shape.num_elements(), out_type, name=name)\n return gen_array_ops.size(input, name=name, out_type=out_type)\n\n\n@tf_export(\"rank\")\ndef rank(input, name=None):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the rank of a tensor.\n\n Returns a 0-D `int32` `Tensor` representing the rank of `input`.\n\n For example:\n\n ```python\n # shape of tensor 't' is [2, 2, 3]\n t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\n tf.rank(t) # 3\n ```\n\n **Note**: The rank of a tensor is not the same as the rank of a matrix. The\n rank of a tensor is the number of indices required to uniquely select each\n element of the tensor. Rank is also known as \"order\", \"degree\", or \"ndims.\"\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `int32`.\n\n @compatibility(numpy)\n Equivalent to np.ndim\n @end_compatibility\n \"\"\"\n return rank_internal(input, name, optimize=True)\n\n\ndef rank_internal(input, name=None, optimize=True):\n # pylint: disable=redefined-builtin\n \"\"\"Returns the rank of a tensor.\n\n Args:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n optimize: if true, encode the rank as a constant when possible.\n\n Returns:\n A `Tensor` of type `int32`.\n \"\"\"\n with ops.name_scope(name, \"Rank\", [input]) as name:\n if isinstance(input, (sparse_tensor.SparseTensor,\n sparse_tensor.SparseTensorValue)):\n return gen_array_ops.size(input.dense_shape, name=name)\n else:\n input_tensor = ops.convert_to_tensor(input)\n input_shape = input_tensor.get_shape()\n if optimize and input_shape.ndims is not None:\n return constant(input_shape.ndims, dtypes.int32, name=name)\n return gen_array_ops.rank(input, name=name)\n\n\ndef _slice_helper(tensor, slice_spec, var=None):\n \"\"\"Overload for Tensor.__getitem__.\n\n This operation extracts the specified region from the tensor.\n The notation is similar to NumPy with the restriction that\n currently only support basic indexing. That means that\n using a non-scalar tensor as input is not currently allowed.\n\n Some useful examples:\n\n ```python\n # strip leading and trailing 2 elements\n foo = tf.constant([1,2,3,4,5,6])\n print(foo[2:-2].eval()) # => [3,4]\n\n # skip every row and reverse every column\n foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n print(foo[::2,::-1].eval()) # => [[3,2,1], [9,8,7]]\n\n # Use scalar tensors as indices on both dimensions\n print(foo[tf.constant(0), tf.constant(2)].eval()) # => 3\n\n # Insert another dimension\n foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]\n print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]\n print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],\n [[7],[8],[9]]]\n\n # Ellipses (3 equivalent operations)\n foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])\n print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]\n print(foo[tf.newaxis, ...].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]\n print(foo[tf.newaxis].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]\n ```\n\n Notes:\n - `tf.newaxis` is `None` as in NumPy.\n - An implicit ellipsis is placed at the end of the `slice_spec`\n - NumPy advanced indexing is currently not supported.\n\n Args:\n tensor: An ops.Tensor object.\n slice_spec: The arguments to Tensor.__getitem__.\n var: In the case of variable slice assignment, the Variable\n object to slice (i.e. tensor is the read-only view of this\n variable).\n\n Returns:\n The appropriate slice of \"tensor\", based on \"slice_spec\".\n\n Raises:\n ValueError: If a slice range is negative size.\n TypeError: If the slice indices aren't int, slice, or Ellipsis.\n \"\"\"\n\n if not isinstance(slice_spec, (list, tuple)):\n slice_spec = [slice_spec]\n\n begin, end, strides = [], [], []\n index = 0\n\n new_axis_mask, shrink_axis_mask = 0, 0\n begin_mask, end_mask = 0, 0\n ellipsis_mask = 0\n for s in slice_spec:\n if isinstance(s, _BaseSlice):\n # python doesn't always use None when constructing ranges\n # for example a[:] gives slice(None,sys.maxsize,None)\n # whereas a[::1] gives slice(None,None,None)\n if s.start is not None and s.start is not sys.maxsize:\n begin.append(s.start)\n else:\n begin.append(0)\n begin_mask |= (1 << index)\n if s.stop is not None and s.stop != sys.maxsize:\n end.append(s.stop)\n else:\n end.append(0)\n end_mask |= (1 << index)\n if s.step is not None:\n strides.append(s.step)\n else:\n strides.append(1)\n elif s is Ellipsis:\n begin.append(0)\n end.append(0)\n strides.append(1)\n ellipsis_mask |= (1 << index)\n elif s is newaxis:\n begin.append(0)\n end.append(0)\n strides.append(1)\n new_axis_mask |= (1 << index)\n else:\n begin.append(s)\n end.append(s + 1)\n strides.append(1)\n shrink_axis_mask |= (1 << index)\n index += 1\n\n # stack possibly involves no tensors, so we must use op_scope correct graph.\n with ops.name_scope(None, \"strided_slice\",\n [tensor] + begin + end + strides) as name:\n if begin:\n packed_begin, packed_end, packed_strides = (stack(begin), stack(end),\n stack(strides))\n if (packed_begin.dtype == dtypes.int64 or\n packed_end.dtype == dtypes.int64 or\n packed_strides.dtype == dtypes.int64):\n if packed_begin.dtype != dtypes.int64:\n packed_begin = gen_math_ops.cast(packed_begin, dtypes.int64)\n if packed_end.dtype != dtypes.int64:\n packed_end = gen_math_ops.cast(packed_end, dtypes.int64)\n if packed_strides.dtype != dtypes.int64:\n packed_strides = gen_math_ops.cast(packed_strides, dtypes.int64)\n else:\n var_empty = constant([], dtype=dtypes.int32)\n packed_begin = packed_end = packed_strides = var_empty\n return strided_slice(\n tensor,\n packed_begin,\n packed_end,\n packed_strides,\n begin_mask=begin_mask,\n end_mask=end_mask,\n shrink_axis_mask=shrink_axis_mask,\n new_axis_mask=new_axis_mask,\n ellipsis_mask=ellipsis_mask,\n var=var,\n name=name)\n\n\n# pylint: disable=undefined-variable,protected-access,redefined-outer-name\n@tf_export(\"slice\")\ndef slice(input_, begin, size, name=None):\n # pylint: disable=redefined-builtin\n \"\"\"Extracts a slice from a tensor.\n\n This operation extracts a slice of size `size` from a tensor `input` starting\n at the location specified by `begin`. The slice `size` is represented as a\n tensor shape, where `size[i]` is the number of elements of the 'i'th dimension\n of `input` that you want to slice. The starting location (`begin`) for the\n slice is represented as an offset in each dimension of `input`. In other\n words, `begin[i]` is the offset into the 'i'th dimension of `input` that you\n want to slice from.\n\n Note that @{tf.Tensor.__getitem__} is typically a more pythonic way to\n perform slices, as it allows you to write `foo[3:7, :-2]` instead of\n `tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.\n\n `begin` is zero-based; `size` is one-based. If `size[i]` is -1,\n all remaining elements in dimension i are included in the\n slice. In other words, this is equivalent to setting:\n\n `size[i] = input.dim_size(i) - begin[i]`\n\n This operation requires that:\n\n `0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`\n\n For example:\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]],\n [[3, 3, 3], [4, 4, 4]],\n [[5, 5, 5], [6, 6, 6]]])\n tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]]\n tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3],\n # [4, 4, 4]]]\n tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]],\n # [[5, 5, 5]]]\n ```\n\n Args:\n input_: A `Tensor`.\n begin: An `int32` or `int64` `Tensor`.\n size: An `int32` or `int64` `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` the same type as `input`.\n \"\"\"\n return gen_array_ops._slice(input_, begin, size, name=name)\n\n\n# pylint: disable=invalid-name\n@tf_export(\"strided_slice\")\ndef strided_slice(input_,\n begin,\n end,\n strides=None,\n begin_mask=0,\n end_mask=0,\n ellipsis_mask=0,\n new_axis_mask=0,\n shrink_axis_mask=0,\n var=None,\n name=None):\n \"\"\"Extracts a strided slice of a tensor (generalized python array indexing).\n\n **Instead of calling this op directly most users will want to use the\n NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which\n is supported via @{tf.Tensor.__getitem__} and @{tf.Variable.__getitem__}.**\n The interface of this op is a low-level encoding of the slicing syntax.\n\n Roughly speaking, this op extracts a slice of size `(end-begin)/stride`\n from the given `input_` tensor. Starting at the location specified by `begin`\n the slice continues by adding `stride` to the index until all dimensions are\n not less than `end`.\n Note that a stride can be negative, which causes a reverse slice.\n\n Given a Python slice `input[spec0, spec1, ..., specn]`,\n this function will be called as follows.\n\n `begin`, `end`, and `strides` will be vectors of length n.\n n in general is not equal to the rank of the `input_` tensor.\n\n In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`,\n `new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to\n the ith spec.\n\n If the ith bit of `begin_mask` is set, `begin[i]` is ignored and\n the fullest possible range in that dimension is used instead.\n `end_mask` works analogously, except with the end range.\n\n `foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.\n `foo[::-1]` reverses a tensor with shape 8.\n\n If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions\n as needed will be inserted between other dimensions. Only one\n non-zero bit is allowed in `ellipsis_mask`.\n\n For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is\n equivalent to `foo[3:5,:,:,4:5]` and\n `foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.\n\n If the ith bit of `new_axis_mask` is set, then `begin`,\n `end`, and `stride` are ignored and a new length 1 dimension is\n added at this point in the output tensor.\n\n For example,\n `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.\n\n If the ith bit of `shrink_axis_mask` is set, it implies that the ith\n specification shrinks the dimensionality by 1. `begin[i]`, `end[i]` and\n `strides[i]` must imply a slice of size 1 in the dimension. For example in\n Python one might do `foo[:, 3, :]` which would result in\n `shrink_axis_mask` equal to 2.\n\n\n NOTE: `begin` and `end` are zero-indexed.\n `strides` entries must be non-zero.\n\n\n ```python\n t = tf.constant([[[1, 1, 1], [2, 2, 2]],\n [[3, 3, 3], [4, 4, 4]],\n [[5, 5, 5], [6, 6, 6]]])\n tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1]) # [[[3, 3, 3]]]\n tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1]) # [[[3, 3, 3],\n # [4, 4, 4]]]\n tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1]) # [[[4, 4, 4],\n # [3, 3, 3]]]\n ```\n\n Args:\n input_: A `Tensor`.\n begin: An `int32` or `int64` `Tensor`.\n end: An `int32` or `int64` `Tensor`.\n strides: An `int32` or `int64` `Tensor`.\n begin_mask: An `int32` mask.\n end_mask: An `int32` mask.\n ellipsis_mask: An `int32` mask.\n new_axis_mask: An `int32` mask.\n shrink_axis_mask: An `int32` mask.\n var: The variable corresponding to `input_` or None\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` the same type as `input`.\n \"\"\"\n\n if strides is None:\n strides = ones_like(begin)\n\n op = gen_array_ops.strided_slice(\n input=input_,\n begin=begin,\n end=end,\n strides=strides,\n name=name,\n begin_mask=begin_mask,\n end_mask=end_mask,\n ellipsis_mask=ellipsis_mask,\n new_axis_mask=new_axis_mask,\n shrink_axis_mask=shrink_axis_mask)\n\n parent_name = name\n\n def assign(val, name=None):\n \"\"\"Closure that holds all the arguments to create an assignment.\"\"\"\n\n if var is None:\n raise ValueError(\"Sliced assignment is only supported for variables\")\n\n if name is None:\n name = parent_name + \"_assign\"\n\n return var._strided_slice_assign(\n begin=begin,\n end=end,\n strides=strides,\n value=val,\n name=name,\n begin_mask=begin_mask,\n end_mask=end_mask,\n ellipsis_mask=ellipsis_mask,\n new_axis_mask=new_axis_mask,\n shrink_axis_mask=shrink_axis_mask)\n\n if context.in_graph_mode():\n # TODO(apassos) In eager mode assignment will be done by overriding\n # __setitem__ instead.\n op.assign = assign\n return op\n\n\ndef _SliceHelperVar(var, slice_spec):\n \"\"\"Creates a slice helper object given a variable.\n\n This allows creating a sub-tensor from part of the current contents\n of a variable. See ${tf.Tensor$`Tensor.__getitem__`}\n for detailed examples of slicing.\n\n This function in addition also allows assignment to a sliced range.\n This is similar to `__setitem__` functionality in Python. However,\n the syntax is different so that the user can capture the assignment\n operation for grouping or passing to `sess.run()`.\n For example,\n\n ```python\n import tensorflow as tf\n A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n print(sess.run(A[:2, :2])) # => [[1,2], [4,5]]\n\n op = A[:2,:2].assign(22. * tf.ones((2, 2)))\n print(sess.run(op)) # => [[22, 22, 3], [22, 22, 6], [7,8,9]]\n ```\n\n Note that assignments currently do not support NumPy broadcasting\n semantics.\n\n Args:\n var: An `ops.Variable` object.\n slice_spec: The arguments to `Tensor.__getitem__`.\n\n Returns:\n The appropriate slice of \"tensor\", based on \"slice_spec\".\n As an operator. The operator also has a `assign()` method\n that can be used to generate an assignment operator.\n\n Raises:\n ValueError: If a slice range is negative size.\n TypeError: If the slice indices aren't int, slice, or Ellipsis.\n\n \"\"\"\n\n return _slice_helper(var._AsTensor(), slice_spec, var)\n\n\nops.Tensor._override_operator(\"__getitem__\", _slice_helper)\n\n\n@tf_export(\"parallel_stack\")\ndef parallel_stack(values, name=\"parallel_stack\"):\n \"\"\"Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.\n\n Requires that the shape of inputs be known at graph construction time.\n\n Packs the list of tensors in `values` into a tensor with rank one higher than\n each tensor in `values`, by packing them along the first dimension.\n Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`\n tensor will have the shape `(N, A, B, C)`.\n\n For example:\n\n ```python\n x = tf.constant([1, 4])\n y = tf.constant([2, 5])\n z = tf.constant([3, 6])\n tf.parallel_stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]]\n ```\n\n The difference between `stack` and `parallel_stack` is that `stack` requires\n all the inputs be computed before the operation will begin but doesn't require\n that the input shapes be known during graph construction.\n\n `parallel_stack` will copy pieces of the input into the output as they become\n available, in some situations this can provide a performance benefit.\n\n Unlike `stack`, `parallel_stack` does NOT support backpropagation.\n\n This is the opposite of unstack. The numpy equivalent is\n\n tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])\n\n Args:\n values: A list of `Tensor` objects with the same shape and type.\n name: A name for this operation (optional).\n\n Returns:\n output: A stacked `Tensor` with the same type as `values`.\n \"\"\"\n with ops.name_scope(name):\n value_t = ops.convert_to_tensor(values[0])\n value_shape = ops.convert_to_tensor(value_t).get_shape()\n\n output_shape = tensor_shape.TensorShape([len(values)])\n output_shape = output_shape.concatenate(value_shape)\n # expand_dims converts concat to stack.\n return gen_array_ops._parallel_concat(\n [expand_dims(value, 0) for value in values], shape=output_shape)\n\n\n@tf_export(\"stack\")\ndef stack(values, axis=0, name=\"stack\"):\n \"\"\"Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.\n\n Packs the list of tensors in `values` into a tensor with rank one higher than\n each tensor in `values`, by packing them along the `axis` dimension.\n Given a list of length `N` of tensors of shape `(A, B, C)`;\n\n if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.\n if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.\n Etc.\n\n For example:\n\n ```python\n x = tf.constant([1, 4])\n y = tf.constant([2, 5])\n z = tf.constant([3, 6])\n tf.stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]] (Pack along first dim.)\n tf.stack([x, y, z], axis=1) # [[1, 2, 3], [4, 5, 6]]\n ```\n\n This is the opposite of unstack. The numpy equivalent is\n\n ```python\n tf.stack([x, y, z]) = np.stack([x, y, z])\n ```\n\n Args:\n values: A list of `Tensor` objects with the same shape and type.\n axis: An `int`. The axis to stack along. Defaults to the first dimension.\n Negative values wrap around, so the valid range is `[-(R+1), R+1)`.\n name: A name for this operation (optional).\n\n Returns:\n output: A stacked `Tensor` with the same type as `values`.\n\n Raises:\n ValueError: If `axis` is out of the range [-(R+1), R+1).\n \"\"\"\n if axis == 0:\n try:\n # If the input is a constant list, it can be converted to a constant op\n return ops.convert_to_tensor(values, name=name)\n except (TypeError, ValueError):\n pass # Input list contains non-constant tensors\n\n value_shape = ops.convert_to_tensor(values[0], name=name).get_shape()\n if value_shape.ndims is not None:\n expanded_num_dims = value_shape.ndims + 1\n if axis < -expanded_num_dims or axis >= expanded_num_dims:\n raise ValueError(\"axis = %d not in [%d, %d)\" % (axis, -expanded_num_dims,\n expanded_num_dims))\n\n return gen_array_ops._pack(values, axis=axis, name=name)\n\n\n# pylint: disable=invalid-name\ndef _autopacking_helper(list_or_tuple, dtype, name):\n \"\"\"Converts the given list or tuple to a tensor by packing.\n\n Args:\n list_or_tuple: A (possibly nested) list or tuple containing a tensor.\n dtype: The element type of the returned tensor.\n name: A name for the returned tensor.\n\n Returns:\n A `tf.Tensor` with value equivalent to `list_or_tuple`.\n \"\"\"\n must_pack = False\n converted_elems = []\n with ops.name_scope(name) as scope:\n for i, elem in enumerate(list_or_tuple):\n if ops.is_dense_tensor_like(elem):\n if dtype is not None and elem.dtype.base_dtype != dtype:\n raise TypeError(\"Cannot convert a list containing a tensor of dtype \"\n \"%s to %s (Tensor is: %r)\" % (elem.dtype, dtype,\n elem))\n converted_elems.append(elem)\n must_pack = True\n elif isinstance(elem, (list, tuple)):\n converted_elem = _autopacking_helper(elem, dtype, str(i))\n if ops.is_dense_tensor_like(converted_elem):\n must_pack = True\n converted_elems.append(converted_elem)\n else:\n converted_elems.append(elem)\n if must_pack:\n elems_as_tensors = []\n for i, elem in enumerate(converted_elems):\n if ops.is_dense_tensor_like(elem):\n elems_as_tensors.append(elem)\n else:\n # NOTE(mrry): This is inefficient, but it enables us to\n # handle the case where the list arguments are other\n # convertible-to-tensor types, such as numpy arrays.\n elems_as_tensors.append(\n constant_op.constant(elem, dtype=dtype, name=str(i)))\n return gen_array_ops._pack(elems_as_tensors, name=scope)\n else:\n return converted_elems\n\n\ndef _get_dtype_from_nested_lists(list_or_tuple):\n \"\"\"Returns the dtype of any tensor-like object in `list_or_tuple`, if found.\n\n Args:\n list_or_tuple: A list or tuple representing an object that can be\n converted to a `tf.Tensor`.\n\n Returns:\n The dtype of any tensor-like object in `list_or_tuple`, or `None` if no\n such object exists.\n \"\"\"\n for elem in list_or_tuple:\n if ops.is_dense_tensor_like(elem):\n return elem.dtype.base_dtype\n elif isinstance(elem, (list, tuple)):\n maybe_dtype = _get_dtype_from_nested_lists(elem)\n if maybe_dtype is not None:\n return maybe_dtype\n return None\n\n\ndef _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):\n \"\"\"Tensor conversion function that automatically packs arguments.\"\"\"\n if as_ref:\n return NotImplemented\n inferred_dtype = _get_dtype_from_nested_lists(v)\n if inferred_dtype is None:\n # We did not find any tensor-like objects in the nested lists, so defer to\n # other conversion functions.\n return NotImplemented\n if dtype is not None and dtype != inferred_dtype:\n return NotImplemented\n return _autopacking_helper(v, inferred_dtype, name or \"packed\")\n\n\n# pylint: enable=invalid-name\n\n# NOTE: Register this conversion function to run *before* one that\n# assumes every element is a value.\nops.register_tensor_conversion_function((list, tuple),\n _autopacking_conversion_function, 99)\n\n\n@tf_export(\"unstack\")\ndef unstack(value, num=None, axis=0, name=\"unstack\"):\n \"\"\"Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.\n\n Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.\n If `num` is not specified (the default), it is inferred from `value`'s shape.\n If `value.shape[axis]` is not known, `ValueError` is raised.\n\n For example, given a tensor of shape `(A, B, C, D)`;\n\n If `axis == 0` then the i'th tensor in `output` is the slice\n `value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.\n (Note that the dimension unpacked along is gone, unlike `split`).\n\n If `axis == 1` then the i'th tensor in `output` is the slice\n `value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.\n Etc.\n\n This is the opposite of stack. The numpy equivalent is\n\n tf.unstack(x, n) = np.unstack(x)\n\n Args:\n value: A rank `R > 0` `Tensor` to be unstacked.\n num: An `int`. The length of the dimension `axis`. Automatically inferred\n if `None` (the default).\n axis: An `int`. The axis to unstack along. Defaults to the first\n dimension. Negative values wrap around, so the valid range is `[-R, R)`.\n name: A name for the operation (optional).\n\n Returns:\n The list of `Tensor` objects unstacked from `value`.\n\n Raises:\n ValueError: If `num` is unspecified and cannot be inferred.\n ValueError: If `axis` is out of the range [-R, R).\n \"\"\"\n if num is None:\n value = ops.convert_to_tensor(value)\n value_shape = value.get_shape()\n if value_shape.ndims is not None:\n if axis < -value_shape.ndims or axis >= value_shape.ndims:\n raise ValueError(\"axis = %d not in [%d, %d)\" %\n (axis, -value_shape.ndims, value_shape.ndims))\n num = value_shape[axis].value\n if num is None:\n raise ValueError(\"Cannot infer num from shape %s\" % value_shape)\n return gen_array_ops._unpack(value, num=num, axis=axis, name=name)\n\n\n@tf_export(\"concat\")\ndef concat(values, axis, name=\"concat\"):\n \"\"\"Concatenates tensors along one dimension.\n\n Concatenates the list of tensors `values` along dimension `axis`. If\n `values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated\n result has shape\n\n [D0, D1, ... Raxis, ...Dn]\n\n where\n\n Raxis = sum(Daxis(i))\n\n That is, the data from the input tensors is joined along the `axis`\n dimension.\n\n The number of dimensions of the input tensors must match, and all dimensions\n except `axis` must be equal.\n\n For example:\n\n ```python\n t1 = [[1, 2, 3], [4, 5, 6]]\n t2 = [[7, 8, 9], [10, 11, 12]]\n tf.concat([t1, t2], 0) # [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]\n tf.concat([t1, t2], 1) # [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]\n\n # tensor t3 with shape [2, 3]\n # tensor t4 with shape [2, 3]\n tf.shape(tf.concat([t3, t4], 0)) # [4, 3]\n tf.shape(tf.concat([t3, t4], 1)) # [2, 6]\n ```\n As in Python, the `axis` could also be negative numbers. Negative `axis`\n are interpreted as counting from the end of the rank, i.e.,\n `axis + rank(values)`-th dimension.\n\n For example:\n\n ```python\n t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]\n t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]\n tf.concat([t1, t2], -1)\n ```\n\n would produce:\n\n ```python\n [[[ 1, 2, 7, 4],\n [ 2, 3, 8, 4]],\n\n [[ 4, 4, 2, 10],\n [ 5, 3, 15, 11]]]\n ```\n\n Note: If you are concatenating along a new axis consider using stack.\n E.g.\n\n ```python\n tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)\n ```\n\n can be rewritten as\n\n ```python\n tf.stack(tensors, axis=axis)\n ```\n\n Args:\n values: A list of `Tensor` objects or a single `Tensor`.\n axis: 0-D `int32` `Tensor`. Dimension along which to concatenate. Must be\n in the range `[-rank(values), rank(values))`. As in Python, indexing\n for axis is 0-based. Positive axis in the rage of\n `[0, rank(values))` refers to `axis`-th dimension. And negative axis\n refers to `axis + rank(values)`-th dimension.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` resulting from concatenation of the input tensors.\n \"\"\"\n if not isinstance(values, (list, tuple)):\n values = [values]\n # TODO(mrry): Change to return values?\n if len(values) == 1: # Degenerate case of one tensor.\n # Make a throwaway call to convert_to_tensor to make sure\n # that axis is of the correct type, and make sure that\n # the returned tensor is a scalar.\n # TODO(keveman): Implement a standalone type and shape checker.\n with ops.name_scope(name) as scope:\n ops.convert_to_tensor(\n axis, name=\"concat_dim\",\n dtype=dtypes.int32).get_shape().assert_is_compatible_with(\n tensor_shape.scalar())\n return identity(values[0], name=scope)\n return gen_array_ops._concat_v2(values=values, axis=axis, name=name)\n\n\n@tf_export(\"boolean_mask\")\ndef boolean_mask(tensor, mask, name=\"boolean_mask\", axis=None):\n \"\"\"Apply boolean mask to tensor. Numpy equivalent is `tensor[mask]`.\n\n ```python\n # 1-D example\n tensor = [0, 1, 2, 3]\n mask = np.array([True, False, True, False])\n boolean_mask(tensor, mask) # [0, 2]\n ```\n\n In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match\n the first K dimensions of `tensor`'s shape. We then have:\n `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`\n where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).\n The `axis` could be used with `mask` to indicate the axis to mask from.\n In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match\n the first `axis + dim(mask)` dimensions of `tensor`'s shape.\n\n Args:\n tensor: N-D tensor.\n mask: K-D boolean tensor, K <= N and K must be known statically.\n name: A name for this operation (optional).\n axis: A 0-D int Tensor representing the axis in `tensor` to mask from.\n By default, axis is 0 which will mask from the first dimension. Otherwise\n K + axis <= N.\n\n Returns:\n (N-K+1)-dimensional tensor populated by entries in `tensor` corresponding\n to `True` values in `mask`.\n\n Raises:\n ValueError: If shapes do not conform.\n\n Examples:\n\n ```python\n # 2-D example\n tensor = [[1, 2], [3, 4], [5, 6]]\n mask = np.array([True, False, True])\n boolean_mask(tensor, mask) # [[1, 2], [5, 6]]\n ```\n \"\"\"\n\n def _apply_mask_1d(reshaped_tensor, mask, axis=None):\n \"\"\"Mask tensor along dimension 0 with a 1-D mask.\"\"\"\n indices = squeeze(where(mask), squeeze_dims=[1])\n return gather(reshaped_tensor, indices, axis=axis)\n\n with ops.name_scope(name, values=[tensor, mask]):\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n mask = ops.convert_to_tensor(mask, name=\"mask\")\n\n shape_mask = mask.get_shape()\n ndims_mask = shape_mask.ndims\n shape_tensor = tensor.get_shape()\n if ndims_mask == 0:\n raise ValueError(\"mask cannot be scalar.\")\n if ndims_mask is None:\n raise ValueError(\n \"Number of mask dimensions must be specified, even if some dimensions\"\n \" are None. E.g. shape=[None] is ok, but shape=None is not.\")\n axis = 0 if axis is None else axis\n shape_tensor[axis:axis + ndims_mask].assert_is_compatible_with(shape_mask)\n\n leading_size = gen_math_ops._prod(\n shape(tensor)[axis:axis + ndims_mask], [0])\n tensor = reshape(tensor,\n concat([\n shape(tensor)[:axis], [leading_size],\n shape(tensor)[axis + ndims_mask:]\n ], 0))\n first_dim = shape_tensor[axis:axis + ndims_mask].num_elements()\n tensor.set_shape(\n tensor_shape.as_shape(shape_tensor[:axis]).concatenate([first_dim])\n .concatenate(shape_tensor[axis + ndims_mask:]))\n\n mask = reshape(mask, [-1])\n return _apply_mask_1d(tensor, mask, axis)\n\n\n@tf_export(\"sparse_mask\")\ndef sparse_mask(a, mask_indices, name=None):\n \"\"\"Masks elements of `IndexedSlices`.\n\n Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that\n contains a subset of the slices of `a`. Only the slices at indices not\n specified in `mask_indices` are returned.\n\n This is useful when you need to extract a subset of slices in an\n `IndexedSlices` object.\n\n For example:\n\n ```python\n # `a` contains slices at indices [12, 26, 37, 45] from a large tensor\n # with shape [1000, 10]\n a.indices # [12, 26, 37, 45]\n tf.shape(a.values) # [4, 10]\n\n # `b` will be the subset of `a` slices at its second and third indices, so\n # we want to mask its first and last indices (which are at absolute\n # indices 12, 45)\n b = tf.sparse_mask(a, [12, 45])\n\n b.indices # [26, 37]\n tf.shape(b.values) # [2, 10]\n ```\n\n Args:\n a: An `IndexedSlices` instance.\n mask_indices: Indices of elements to mask.\n name: A name for the operation (optional).\n\n Returns:\n The masked `IndexedSlices` instance.\n \"\"\"\n with ops.name_scope(name, \"sparse_mask\", [a, mask_indices]) as name:\n indices = a.indices\n out_indices, to_gather = setdiff1d(indices, mask_indices)\n out_values = gather(a.values, to_gather, name=name)\n return ops.IndexedSlices(out_values, out_indices, a.dense_shape)\n\n\n@tf_export(\"unique\")\ndef unique(x, out_idx=dtypes.int32, name=None):\n # TODO(yongtang): switch to v2 once API deprecation\n # period (3 weeks) pass.\n # TODO(yongtang): The documentation should also\n # be updated when switch to v2.\n return gen_array_ops._unique(x, out_idx, name)\n\n\nunique.__doc__ = gen_array_ops._unique.__doc__\n\n\n@tf_export(\"split\")\ndef split(value, num_or_size_splits, axis=0, num=None, name=\"split\"):\n \"\"\"Splits a tensor into sub tensors.\n\n If `num_or_size_splits` is an integer type, `num_split`, then splits `value`\n along dimension `axis` into `num_split` smaller tensors.\n Requires that `num_split` evenly divides `value.shape[axis]`.\n\n If `num_or_size_splits` is not an integer type, it is presumed to be a Tensor\n `size_splits`, then splits `value` into `len(size_splits)` pieces. The shape\n of the `i`-th piece has the same size as the `value` except along dimension\n `axis` where the size is `size_splits[i]`.\n\n For example:\n\n ```python\n # 'value' is a tensor with shape [5, 30]\n # Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1\n split0, split1, split2 = tf.split(value, [4, 15, 11], 1)\n tf.shape(split0) # [5, 4]\n tf.shape(split1) # [5, 15]\n tf.shape(split2) # [5, 11]\n # Split 'value' into 3 tensors along dimension 1\n split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1)\n tf.shape(split0) # [5, 10]\n ```\n\n Args:\n value: The `Tensor` to split.\n num_or_size_splits: Either a 0-D integer `Tensor` indicating the number of\n splits along split_dim or a 1-D integer `Tensor` containing\n the sizes of each output tensor along split_dim. If a scalar then it must\n evenly divide `value.shape[axis]`; otherwise the sum of sizes along the\n split dimension must match that of the `value`.\n axis: A 0-D `int32` `Tensor`. The dimension along which to split.\n Must be in the range `[-rank(value), rank(value))`. Defaults to 0.\n num: Optional, used to specify the number of outputs when it cannot be\n inferred from the shape of `size_splits`.\n name: A name for the operation (optional).\n\n Returns:\n if `num_or_size_splits` is a scalar returns `num_or_size_splits` `Tensor`\n objects; if `num_or_size_splits` is a 1-D Tensor returns\n `num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting\n `value`.\n\n Raises:\n ValueError: If `num` is unspecified and cannot be inferred.\n \"\"\"\n size_splits = ops.convert_to_tensor(num_or_size_splits)\n if size_splits._rank() == 0 and size_splits.dtype.is_integer:\n return gen_array_ops._split(\n axis=axis, num_split=num_or_size_splits, value=value, name=name)\n\n if num is None:\n num = size_splits._shape_tuple()[0]\n if num is None:\n raise ValueError(\"Cannot infer num from shape %s\" % num_or_size_splits)\n\n return gen_array_ops._split_v(\n value=value,\n size_splits=size_splits,\n axis=axis,\n num_split=num,\n name=name)\n\n\n@tf_export(\"transpose\")\ndef transpose(a, perm=None, name=\"transpose\", conjugate=False):\n \"\"\"Transposes `a`. Permutes the dimensions according to `perm`.\n\n The returned tensor's dimension i will correspond to the input dimension\n `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is\n the rank of the input tensor. Hence by default, this operation performs a\n regular matrix transpose on 2-D input Tensors. If conjugate is True and\n `a.dtype` is either `complex64` or `complex128` then the values of `a`\n are conjugated and transposed.\n\n @compatibility(numpy)\n In `numpy` transposes are memory-efficient constant time operations as they\n simply return a new view of the same data with adjusted `strides`.\n\n TensorFlow does not support strides, so `transpose` returns a new tensor with\n the items permuted.\n @end_compatibility\n\n For example:\n\n ```python\n x = tf.constant([[1, 2, 3], [4, 5, 6]])\n tf.transpose(x) # [[1, 4]\n # [2, 5]\n # [3, 6]]\n\n # Equivalently\n tf.transpose(x, perm=[1, 0]) # [[1, 4]\n # [2, 5]\n # [3, 6]]\n\n # If x is complex, setting conjugate=True gives the conjugate transpose\n x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],\n [4 + 4j, 5 + 5j, 6 + 6j]])\n tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],\n # [2 - 2j, 5 - 5j],\n # [3 - 3j, 6 - 6j]]\n\n # 'perm' is more useful for n-dimensional tensors, for n > 2\n x = tf.constant([[[ 1, 2, 3],\n [ 4, 5, 6]],\n [[ 7, 8, 9],\n [10, 11, 12]]])\n\n # Take the transpose of the matrices in dimension-0\n # (this common operation has a shorthand `matrix_transpose`)\n tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],\n # [2, 5],\n # [3, 6]],\n # [[7, 10],\n # [8, 11],\n # [9, 12]]]\n ```\n\n Args:\n a: A `Tensor`.\n perm: A permutation of the dimensions of `a`.\n name: A name for the operation (optional).\n conjugate: Optional bool. Setting it to `True` is mathematically equivalent\n to tf.conj(tf.transpose(input)).\n\n Returns:\n A transposed `Tensor`.\n \"\"\"\n with ops.name_scope(name, \"transpose\", [a]) as name:\n transpose_fn = (\n gen_array_ops._conjugate_transpose\n if (conjugate and a.dtype.is_complex) else gen_array_ops.transpose)\n if perm is None:\n rank = gen_array_ops.rank(a)\n perm = (rank - 1) - gen_math_ops._range(0, rank, 1)\n ret = transpose_fn(a, perm, name=name)\n # NOTE(mrry): Setting the shape explicitly because\n # reverse is not handled by the shape function.\n if context.in_graph_mode():\n input_shape = ret.op.inputs[0].get_shape().dims\n if input_shape is not None:\n ret.set_shape(input_shape[::-1])\n else:\n ret = transpose_fn(a, perm, name=name)\n return ret\n\n\n# pylint: disable=invalid-name\n@tf_export(\"matrix_transpose\", \"linalg.transpose\")\ndef matrix_transpose(a, name=\"matrix_transpose\", conjugate=False):\n \"\"\"Transposes last two dimensions of tensor `a`.\n\n For example:\n\n ```python\n x = tf.constant([[1, 2, 3], [4, 5, 6]])\n tf.matrix_transpose(x) # [[1, 4],\n # [2, 5],\n # [3, 6]]\n\n x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],\n [4 + 4j, 5 + 5j, 6 + 6j]])\n tf.matrix_transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],\n # [2 - 2j, 5 - 5j],\n # [3 - 3j, 6 - 6j]]\n\n # Matrix with two batch dimensions.\n # x.shape is [1, 2, 3, 4]\n # tf.matrix_transpose(x) is shape [1, 2, 4, 3]\n ```\n\n Note that `tf.matmul` provides kwargs allowing for transpose of arguments.\n This is done with minimal cost, and is preferable to using this function. E.g.\n\n ```python\n # Good! Transpose is taken at minimal additional cost.\n tf.matmul(matrix, b, transpose_b=True)\n\n # Inefficient!\n tf.matmul(matrix, tf.matrix_transpose(b))\n ```\n\n @compatibility(numpy)\n In `numpy` transposes are memory-efficient constant time operations as they\n simply return a new view of the same data with adjusted `strides`.\n\n TensorFlow does not support strides, `matrix_transposes` return a new tensor\n with the items permuted.\n @end_compatibility\n\n Args:\n a: A `Tensor` with `rank >= 2`.\n name: A name for the operation (optional).\n conjugate: Optional bool. Setting it to `True` is mathematically equivalent\n to tf.conj(tf.matrix_transpose(input)).\n\n Returns:\n A transposed batch matrix `Tensor`.\n\n Raises:\n ValueError: If `a` is determined statically to have `rank < 2`.\n \"\"\"\n with ops.name_scope(name, values=[a]):\n a = ops.convert_to_tensor(a, name=\"a\")\n\n # If we know the number of dimensions (statically), we can do two things:\n # 1. Check that `a` is a (batch) matrix.\n # 2. Use a python list for perm. This preserves static shape information\n # and avoids extra computations.\n a_shape = a.get_shape()\n ndims = a_shape.ndims\n if ndims is not None:\n if ndims < 2:\n raise ValueError(\n \"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: \"\n \"%s\" % a_shape)\n perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]\n else:\n a_rank = rank(a)\n perm = concat((gen_math_ops._range(0, a_rank - 2, 1),\n [a_rank - 1, a_rank - 2]), 0)\n\n return transpose(a, perm=perm, conjugate=conjugate)\n\n\n# pylint: enable=invalid-name\n\n\n@tf_export(\"zeros\")\ndef zeros(shape, dtype=dtypes.float32, name=None):\n \"\"\"Creates a tensor with all elements set to zero.\n\n This operation returns a tensor of type `dtype` with shape `shape` and\n all elements set to zero.\n\n For example:\n\n ```python\n tf.zeros([3, 4], tf.int32) # [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\n ```\n\n Args:\n shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type\n `int32`.\n dtype: The type of an element in the resulting `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to zero.\n \"\"\"\n dtype = dtypes.as_dtype(dtype).base_dtype\n with ops.name_scope(name, \"zeros\", [shape]) as name:\n if dtype == dtypes.bool:\n zero = False\n elif dtype == dtypes.string:\n zero = \"\"\n else:\n zero = 0\n if not isinstance(shape, ops.Tensor):\n try:\n # Go through tensor shapes to get int64-if-needed semantics\n shape = constant_op._tensor_shape_tensor_conversion_function(\n tensor_shape.TensorShape(shape))\n except (TypeError, ValueError):\n # Happens when shape is a list with tensor elements\n shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)\n if not shape._shape_tuple():\n shape = reshape(shape, [-1]) # Ensure it's a vector\n output = fill(shape, constant(zero, dtype=dtype), name=name)\n assert output.dtype.base_dtype == dtype\n return output\n\n\n@tf_export(\"zeros_like\")\ndef zeros_like(tensor, dtype=None, name=None, optimize=True):\n \"\"\"Creates a tensor with all elements set to zero.\n\n Given a single tensor (`tensor`), this operation returns a tensor of the\n same type and shape as `tensor` with all elements set to zero. Optionally,\n you can use `dtype` to specify a new type for the returned tensor.\n\n For example:\n\n ```python\n tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n tf.zeros_like(tensor) # [[0, 0, 0], [0, 0, 0]]\n ```\n\n Args:\n tensor: A `Tensor`.\n dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,\n `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,\n `complex64`, `complex128`, `bool` or `string`.\n name: A name for the operation (optional).\n optimize: if true, attempt to statically determine the shape of 'tensor'\n and encode it as a constant.\n\n Returns:\n A `Tensor` with all elements set to zero.\n \"\"\"\n with ops.name_scope(name, \"zeros_like\", [tensor]) as name:\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n\n if context.in_eager_mode():\n if dtype is not None and dtype != tensor.dtype:\n return zeros(\n shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)\n with ops.device(tensor.device):\n return gen_array_ops._zeros_like(tensor, name=name)\n\n # For now, variant types must be created via zeros_like; as we need to\n # pass the input variant object to the proper zeros callback.\n\n if (optimize and tensor.shape.is_fully_defined() and\n tensor.dtype != dtypes.variant):\n # We can produce a zeros tensor independent of the value of 'tensor',\n # since the shape is known statically.\n return zeros(tensor.shape, dtype=dtype or tensor.dtype, name=name)\n\n if dtype is not None and dtype != tensor.dtype and dtype != dtypes.variant:\n return zeros(\n shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)\n else:\n return gen_array_ops._zeros_like(tensor, name=name)\n\n\n@tf_export(\"ones_like\")\ndef ones_like(tensor, dtype=None, name=None, optimize=True):\n \"\"\"Creates a tensor with all elements set to 1.\n\n Given a single tensor (`tensor`), this operation returns a tensor of the same\n type and shape as `tensor` with all elements set to 1. Optionally, you can\n specify a new type (`dtype`) for the returned tensor.\n\n For example:\n\n ```python\n tensor = tf.constant([[1, 2, 3], [4, 5, 6]])\n tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]\n ```\n\n Args:\n tensor: A `Tensor`.\n dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,\n `int8`, `uint8`, `int16`, `uint16`, int32`, `int64`,\n `complex64`, `complex128` or `bool`.\n name: A name for the operation (optional).\n optimize: if true, attempt to statically determine the shape of 'tensor'\n and encode it as a constant.\n\n Returns:\n A `Tensor` with all elements set to 1.\n \"\"\"\n with ops.name_scope(name, \"ones_like\", [tensor]) as name:\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n ones_shape = shape_internal(tensor, optimize=optimize)\n if dtype is None:\n dtype = tensor.dtype\n ret = ones(ones_shape, dtype=dtype, name=name)\n if context.in_graph_mode():\n ret.set_shape(tensor.get_shape())\n return ret\n\n\n@tf_export(\"ones\")\ndef ones(shape, dtype=dtypes.float32, name=None):\n \"\"\"Creates a tensor with all elements set to 1.\n\n This operation returns a tensor of type `dtype` with shape `shape` and all\n elements set to 1.\n\n For example:\n\n ```python\n tf.ones([2, 3], tf.int32) # [[1, 1, 1], [1, 1, 1]]\n ```\n\n Args:\n shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type\n `int32`.\n dtype: The type of an element in the resulting `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to 1.\n \"\"\"\n dtype = dtypes.as_dtype(dtype).base_dtype\n with ops.name_scope(name, \"ones\", [shape]) as name:\n one = True if dtype == dtypes.bool else 1\n if not isinstance(shape, ops.Tensor):\n try:\n # Go through tensor shapes to get int64-if-needed semantics\n shape = constant_op._tensor_shape_tensor_conversion_function(\n tensor_shape.TensorShape(shape))\n except (TypeError, ValueError):\n # Happens when shape is a list with tensor elements\n shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)\n if not shape._shape_tuple():\n shape = reshape(shape, [-1]) # Ensure it's a vector\n output = fill(shape, constant(one, dtype=dtype), name=name)\n assert output.dtype.base_dtype == dtype\n return output\n\n\n@tf_export(\"placeholder\")\ndef placeholder(dtype, shape=None, name=None):\n \"\"\"Inserts a placeholder for a tensor that will be always fed.\n\n **Important**: This tensor will produce an error if evaluated. Its value must\n be fed using the `feed_dict` optional argument to `Session.run()`,\n `Tensor.eval()`, or `Operation.run()`.\n\n For example:\n\n ```python\n x = tf.placeholder(tf.float32, shape=(1024, 1024))\n y = tf.matmul(x, x)\n\n with tf.Session() as sess:\n print(sess.run(y)) # ERROR: will fail because x was not fed.\n\n rand_array = np.random.rand(1024, 1024)\n print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.\n ```\n\n @compatibility{eager} Placeholders are not compatible with eager execution.\n\n Args:\n dtype: The type of elements in the tensor to be fed.\n shape: The shape of the tensor to be fed (optional). If the shape is not\n specified, you can feed a tensor of any shape.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` that may be used as a handle for feeding a value, but not\n evaluated directly.\n\n Raises:\n RuntimeError: if eager execution is enabled\n \"\"\"\n if context.in_eager_mode():\n raise RuntimeError(\"tf.placeholder() is not compatible with \"\n \"eager execution.\")\n\n return gen_array_ops._placeholder(dtype=dtype, shape=shape, name=name)\n\n\n# pylint: disable=redefined-outer-name\ndef _normalize_sparse_shape(shape, name):\n \"\"\"Returns a tuple of (Tensor or None, rank or None).\"\"\"\n if shape is None:\n return (None, None)\n rank = shape.get_shape()[0] if isinstance(shape, ops.Tensor) else len(shape)\n if not isinstance(shape, ops.Tensor) and None in shape:\n return (None, rank)\n return (ops.convert_to_tensor(shape, dtype=dtypes.int64, name=name), rank)\n\n\n@tf_export(\"sparse_placeholder\")\ndef sparse_placeholder(dtype, shape=None, name=None):\n \"\"\"Inserts a placeholder for a sparse tensor that will be always fed.\n\n **Important**: This sparse tensor will produce an error if evaluated.\n Its value must be fed using the `feed_dict` optional argument to\n `Session.run()`, `Tensor.eval()`, or `Operation.run()`.\n\n For example:\n\n ```python\n x = tf.sparse_placeholder(tf.float32)\n y = tf.sparse_reduce_sum(x)\n\n with tf.Session() as sess:\n print(sess.run(y)) # ERROR: will fail because x was not fed.\n\n indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)\n values = np.array([1.0, 2.0], dtype=np.float32)\n shape = np.array([7, 9, 2], dtype=np.int64)\n print(sess.run(y, feed_dict={\n x: tf.SparseTensorValue(indices, values, shape)})) # Will succeed.\n print(sess.run(y, feed_dict={\n x: (indices, values, shape)})) # Will succeed.\n\n sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)\n sp_value = sp.eval(session=sess)\n print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.\n ```\n\n @compatibility{eager} Placeholders are not compatible with eager execution.\n\n Args:\n dtype: The type of `values` elements in the tensor to be fed.\n shape: The shape of the tensor to be fed (optional). If the shape is not\n specified, you can feed a sparse tensor of any shape.\n name: A name for prefixing the operations (optional).\n\n Returns:\n A `SparseTensor` that may be used as a handle for feeding a value, but not\n evaluated directly.\n\n Raises:\n RuntimeError: if eager execution is enabled\n \"\"\"\n if context.in_eager_mode():\n raise RuntimeError(\"tf.placeholder() is not compatible with \"\n \"eager execution.\")\n\n shape_name = (name + \"/shape\") if name is not None else None\n shape, rank = _normalize_sparse_shape(shape, shape_name)\n if shape is None:\n shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)\n return sparse_tensor.SparseTensor(\n values=placeholder(\n dtype,\n shape=[None],\n name=(name + \"/values\") if name is not None else None),\n indices=placeholder(\n dtypes.int64, shape=[None, rank],\n name=(name + \"/indices\") if name is not None else None),\n dense_shape=shape)\n\n\n# pylint: enable=redefined-outer-name\n\n\n@tf_export(\"pad\")\ndef pad(tensor, paddings, mode=\"CONSTANT\", name=None, constant_values=0): # pylint: disable=invalid-name\n \"\"\"Pads a tensor.\n\n This operation pads a `tensor` according to the `paddings` you specify.\n `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of\n `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how\n many values to add before the contents of `tensor` in that dimension, and\n `paddings[D, 1]` indicates how many values to add after the contents of\n `tensor` in that dimension. If `mode` is \"REFLECT\" then both `paddings[D, 0]`\n and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If\n `mode` is \"SYMMETRIC\" then both `paddings[D, 0]` and `paddings[D, 1]` must be\n no greater than `tensor.dim_size(D)`.\n\n The padded size of each dimension D of the output is:\n\n `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`\n\n For example:\n\n ```python\n t = tf.constant([[1, 2, 3], [4, 5, 6]])\n paddings = tf.constant([[1, 1,], [2, 2]])\n # 'constant_values' is 0.\n # rank of 't' is 2.\n tf.pad(t, paddings, \"CONSTANT\") # [[0, 0, 0, 0, 0, 0, 0],\n # [0, 0, 1, 2, 3, 0, 0],\n # [0, 0, 4, 5, 6, 0, 0],\n # [0, 0, 0, 0, 0, 0, 0]]\n\n tf.pad(t, paddings, \"REFLECT\") # [[6, 5, 4, 5, 6, 5, 4],\n # [3, 2, 1, 2, 3, 2, 1],\n # [6, 5, 4, 5, 6, 5, 4],\n # [3, 2, 1, 2, 3, 2, 1]]\n\n tf.pad(t, paddings, \"SYMMETRIC\") # [[2, 1, 1, 2, 3, 3, 2],\n # [2, 1, 1, 2, 3, 3, 2],\n # [5, 4, 4, 5, 6, 6, 5],\n # [5, 4, 4, 5, 6, 6, 5]]\n ```\n\n Args:\n tensor: A `Tensor`.\n paddings: A `Tensor` of type `int32`.\n mode: One of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\" (case-insensitive)\n name: A name for the operation (optional).\n constant_values: In \"CONSTANT\" mode, the scalar pad value to use. Must be\n same type as `tensor`.\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n\n Raises:\n ValueError: When mode is not one of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\".\n \"\"\"\n\n # Convert lower/mixed case to upper for NumPy compatibility\n # NumPy uses all lower-case modes.\n mode = mode.upper()\n if mode == \"CONSTANT\":\n # TODO(rjryan): Once the forward compatibility period (3 weeks) have passed\n # remove the \"Pad\" fallback here.\n if constant_values != 0:\n result = gen_array_ops._pad_v2(\n tensor, paddings, constant_values, name=name)\n else:\n result = gen_array_ops._pad(tensor, paddings, name=name)\n elif mode == \"REFLECT\":\n result = gen_array_ops._mirror_pad(\n tensor, paddings, mode=\"REFLECT\", name=name)\n elif mode == \"SYMMETRIC\":\n result = gen_array_ops._mirror_pad(\n tensor, paddings, mode=\"SYMMETRIC\", name=name)\n else:\n raise ValueError(\"Unknown padding mode: %s\" % mode)\n\n # Restore shape information where possible.\n if context.in_graph_mode():\n paddings_constant = tensor_util.constant_value(\n result.op.inputs[1], partial=True)\n input_shape = result.op.inputs[0].shape\n if (input_shape.ndims is not None and not result.shape.is_fully_defined()\n and paddings_constant is not None):\n new_shape = []\n for padding, dim in zip(paddings_constant, input_shape.as_list()):\n if padding is None or dim is None or not all(padding):\n new_shape.append(None)\n else:\n new_shape.append(sum(padding) + dim)\n result.set_shape(new_shape)\n\n return result\n\n\n@tf_export(\"meshgrid\")\ndef meshgrid(*args, **kwargs):\n \"\"\"Broadcasts parameters for evaluation on an N-D grid.\n\n Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`\n of N-D coordinate arrays for evaluating expressions on an N-D grid.\n\n Notes:\n\n `meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.\n When the `indexing` argument is set to 'xy' (the default), the broadcasting\n instructions for the first two dimensions are swapped.\n\n Examples:\n\n Calling `X, Y = meshgrid(x, y)` with the tensors\n\n ```python\n x = [1, 2, 3]\n y = [4, 5, 6]\n X, Y = tf.meshgrid(x, y)\n # X = [[1, 2, 3],\n # [1, 2, 3],\n # [1, 2, 3]]\n # Y = [[4, 4, 4],\n # [5, 5, 5],\n # [6, 6, 6]]\n ```\n\n Args:\n *args: `Tensor`s with rank 1.\n **kwargs:\n - indexing: Either 'xy' or 'ij' (optional, default: 'xy').\n - name: A name for the operation (optional).\n\n Returns:\n outputs: A list of N `Tensor`s with rank N.\n\n Raises:\n TypeError: When no keyword arguments (kwargs) are passed.\n ValueError: When indexing keyword argument is not one of `xy` or `ij`.\n \"\"\"\n\n indexing = kwargs.pop(\"indexing\", \"xy\")\n name = kwargs.pop(\"name\", \"meshgrid\")\n if kwargs:\n key = list(kwargs.keys())[0]\n raise TypeError(\"'{}' is an invalid keyword argument \"\n \"for this function\".format(key))\n\n if indexing not in (\"xy\", \"ij\"):\n raise ValueError(\"indexing parameter must be either 'xy' or 'ij'\")\n\n with ops.name_scope(name, \"meshgrid\", args) as name:\n ndim = len(args)\n s0 = (1,) * ndim\n\n # Prepare reshape by inserting dimensions with size 1 where needed\n output = []\n for i, x in enumerate(args):\n output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))\n # Create parameters for broadcasting each tensor to the full size\n shapes = [size(x) for x in args]\n\n output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype\n\n if indexing == \"xy\" and ndim > 1:\n output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))\n output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))\n shapes[0], shapes[1] = shapes[1], shapes[0]\n\n # TODO(nolivia): improve performance with a broadcast\n mult_fact = ones(shapes, output_dtype)\n return [x * mult_fact for x in output]\n\n\nNEW_AXIS = -1\nSHRINK_AXIS = -2\n\n\n# PEP-8 naming\n# pylint: disable=invalid-name,redefined-outer-name\ndef _compute_size_of_strided_dim(shrink, spec, size):\n \"\"\"Computes the size of a single strided slice dimension.\"\"\"\n\n unknown = None # Document what None means here.\n use_full_range = None # Document other use of None.\n # if this is a shrink axis (i.e. a non-range index)\n # it either will produce an error or return 1\n if shrink:\n return 1\n if size is unknown or size.value is unknown:\n return unknown\n size = size.value\n stride = spec.step\n if stride is not unknown:\n if stride == 0:\n return unknown\n stride = spec.step\n valid_range = [0, size] if stride > 0 else [-1, size - 1]\n\n # PEP-8 naming\n # pylint: disable=invalid-name\n def canonical(x, c):\n if x is use_full_range:\n return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]\n else:\n x_fwd = size + x if x < 0 else x # make negative indices positive\n return max(valid_range[0], min(valid_range[1], x_fwd))\n\n begin = canonical(spec.start, 0)\n end = canonical(spec.stop, 1)\n interval_length = end - begin\n if interval_length == 0 or ((interval_length < 0) != (stride < 0)):\n return 0\n else:\n remainder = 1 if interval_length % stride != 0 else 0\n return interval_length // stride + remainder\n else:\n return unknown # unknown because stride is unknown\n\n\ndef _TileGradShape(op):\n \"\"\"Shape function for the TileGrad op.\"\"\"\n multiples_shape = op.inputs[1].get_shape().with_rank(1)\n input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])\n # NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)\n # it is a vector of non-negative integers, and (ii) doing so allows\n # us to handle partially-known multiples.\n multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(\n input_shape.ndims)\n if multiples.ndims is None:\n return [tensor_shape.unknown_shape()]\n else:\n output_dims = []\n for dim, multiple in zip(input_shape.dims, multiples.dims):\n output_dims.append(dim // multiple)\n return [tensor_shape.TensorShape(output_dims)]\n\n\n@tf_export(\"edit_distance\")\ndef edit_distance(hypothesis, truth, normalize=True, name=\"edit_distance\"):\n \"\"\"Computes the Levenshtein distance between sequences.\n\n This operation takes variable-length sequences (`hypothesis` and `truth`),\n each provided as a `SparseTensor`, and computes the Levenshtein distance.\n You can normalize the edit distance by length of `truth` by setting\n `normalize` to true.\n\n For example, given the following input:\n\n ```python\n # 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:\n # (0,0) = [\"a\"]\n # (1,0) = [\"b\"]\n hypothesis = tf.SparseTensor(\n [[0, 0, 0],\n [1, 0, 0]],\n [\"a\", \"b\"],\n (2, 1, 1))\n\n # 'truth' is a tensor of shape `[2, 2]` with variable-length values:\n # (0,0) = []\n # (0,1) = [\"a\"]\n # (1,0) = [\"b\", \"c\"]\n # (1,1) = [\"a\"]\n truth = tf.SparseTensor(\n [[0, 1, 0],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0]],\n [\"a\", \"b\", \"c\", \"a\"],\n (2, 2, 2))\n\n normalize = True\n ```\n\n This operation would return the following:\n\n ```python\n # 'output' is a tensor of shape `[2, 2]` with edit distances normalized\n # by 'truth' lengths.\n output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis\n [0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis\n ```\n\n Args:\n hypothesis: A `SparseTensor` containing hypothesis sequences.\n truth: A `SparseTensor` containing truth sequences.\n normalize: A `bool`. If `True`, normalizes the Levenshtein distance by\n length of `truth.`\n name: A name for the operation (optional).\n\n Returns:\n A dense `Tensor` with rank `R - 1`, where R is the rank of the\n `SparseTensor` inputs `hypothesis` and `truth`.\n\n Raises:\n TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.\n \"\"\"\n if not isinstance(hypothesis, (sparse_tensor.SparseTensor,\n sparse_tensor.SparseTensorValue)):\n raise TypeError(\"Hypothesis must be a SparseTensor.\")\n if not isinstance(truth, (sparse_tensor.SparseTensor,\n sparse_tensor.SparseTensorValue)):\n raise TypeError(\"Truth must be a SparseTensor.\")\n\n return gen_array_ops._edit_distance(\n hypothesis.indices,\n hypothesis.values,\n hypothesis.dense_shape,\n truth.indices,\n truth.values,\n truth.dense_shape,\n normalize=normalize,\n name=name)\n\n\[email protected](\"FakeQuantWithMinMaxArgs\")\ndef _FakeQuantWithMinMaxArgsGradient(op, grad):\n \"\"\"Gradient for FakeQuantWithMinMaxArgs op.\"\"\"\n return fake_quant_with_min_max_args_gradient(\n grad,\n op.inputs[0],\n min=op.get_attr(\"min\"),\n max=op.get_attr(\"max\"),\n num_bits=op.get_attr(\"num_bits\"),\n narrow_range=op.get_attr(\"narrow_range\"))\n\n\[email protected](\"FakeQuantWithMinMaxVars\")\ndef _FakeQuantWithMinMaxVarsGradient(op, grad):\n \"\"\"Gradient for FakeQuantWithMinMaxVars op.\"\"\"\n return fake_quant_with_min_max_vars_gradient(\n grad,\n op.inputs[0],\n op.inputs[1],\n op.inputs[2],\n num_bits=op.get_attr(\"num_bits\"),\n narrow_range=op.get_attr(\"narrow_range\"))\n\n\[email protected](\"FakeQuantWithMinMaxVarsPerChannel\")\ndef _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):\n \"\"\"Gradient for FakeQuantWithMinMaxVarsPerChannel op.\"\"\"\n return fake_quant_with_min_max_vars_per_channel_gradient(\n grad,\n op.inputs[0],\n op.inputs[1],\n op.inputs[2],\n num_bits=op.get_attr(\"num_bits\"),\n narrow_range=op.get_attr(\"narrow_range\"))\n\n\n@tf_export(\"required_space_to_batch_paddings\")\ndef required_space_to_batch_paddings(input_shape,\n block_shape,\n base_paddings=None,\n name=None):\n \"\"\"Calculate padding required to make block_shape divide input_shape.\n\n This function can be used to calculate a suitable paddings argument for use\n with space_to_batch_nd and batch_to_space_nd.\n\n Args:\n input_shape: int32 Tensor of shape [N].\n block_shape: int32 Tensor of shape [N].\n base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum\n amount of padding to use. All elements must be >= 0. If not specified,\n defaults to 0.\n name: string. Optional name prefix.\n\n Returns:\n (paddings, crops), where:\n\n `paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]\n satisfying:\n\n paddings[i, 0] = base_paddings[i, 0].\n 0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]\n (input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0\n\n crops[i, 0] = 0\n crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]\n\n Raises: ValueError if called with incompatible shapes.\n \"\"\"\n with ops.name_scope(name, \"required_space_to_batch_paddings\",\n [input_shape, block_shape]):\n input_shape = ops.convert_to_tensor(\n input_shape, dtype=dtypes.int32, name=\"input_shape\")\n block_shape = ops.convert_to_tensor(\n block_shape, dtype=dtypes.int32, name=\"block_shape\")\n\n block_shape.get_shape().assert_is_fully_defined()\n block_shape.get_shape().assert_has_rank(1)\n num_block_dims = block_shape.get_shape()[0].value\n if num_block_dims == 0:\n return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)\n\n input_shape.get_shape().assert_is_compatible_with([num_block_dims])\n\n if base_paddings is not None:\n base_paddings = ops.convert_to_tensor(\n base_paddings, dtype=dtypes.int32, name=\"base_paddings\")\n base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])\n else:\n base_paddings = zeros([num_block_dims, 2], dtypes.int32)\n\n const_block_shape = tensor_util.constant_value(block_shape)\n const_input_shape = tensor_util.constant_value(input_shape)\n const_base_paddings = tensor_util.constant_value(base_paddings)\n if (const_block_shape is not None and const_input_shape is not None and\n const_base_paddings is not None):\n block_shape = const_block_shape\n input_shape = const_input_shape\n base_paddings = const_base_paddings\n\n # Use same expression for both constant and non-constant case.\n pad_start = base_paddings[:, 0]\n orig_pad_end = base_paddings[:, 1]\n full_input_shape = input_shape + pad_start + orig_pad_end\n pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape\n pad_end = orig_pad_end + pad_end_extra\n\n result_paddings = stack(\n [[pad_start[i], pad_end[i]] for i in range(num_block_dims)],\n name=\"paddings\")\n result_crops = stack(\n [[0, pad_end_extra[i]] for i in range(num_block_dims)], name=\"crops\")\n return result_paddings, result_crops\n\n\n@tf_export(\"space_to_batch\")\ndef space_to_batch(input, paddings, block_size, name=None): # pylint: disable=redefined-builtin\n result = space_to_batch_nd(\n input,\n paddings=paddings,\n block_shape=np.array([block_size, block_size], dtype=np.int64),\n name=name)\n result.set_shape(result.get_shape().with_rank(4))\n return result\n\n\nspace_to_batch.__doc__ = gen_array_ops._space_to_batch.__doc__\n\n\n@tf_export(\"space_to_depth\")\ndef space_to_depth(input, block_size, name=None, data_format=\"NHWC\"): # pylint: disable=redefined-builtin\n return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)\n\n\nspace_to_depth.__doc__ = gen_array_ops.space_to_depth.__doc__\n\n\n@tf_export(\"depth_to_space\")\ndef depth_to_space(input, block_size, name=None, data_format=\"NHWC\"): # pylint: disable=redefined-builtin\n return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)\n\n\ndepth_to_space.__doc__ = gen_array_ops.depth_to_space.__doc__\n\n\n@tf_export(\"batch_to_space\")\ndef batch_to_space(input, crops, block_size, name=None): # pylint: disable=redefined-builtin\n result = batch_to_space_nd(\n input,\n crops=crops,\n block_shape=np.array([block_size, block_size], dtype=np.int64),\n name=name)\n result.set_shape(result.get_shape().with_rank(4))\n return result\n\n\nbatch_to_space.__doc__ = gen_array_ops._batch_to_space.__doc__\n\n\n@tf_export(\"one_hot\")\ndef one_hot(indices,\n depth,\n on_value=None,\n off_value=None,\n axis=None,\n dtype=None,\n name=None):\n \"\"\"Returns a one-hot tensor.\n\n The locations represented by indices in `indices` take value `on_value`,\n while all other locations take value `off_value`.\n\n `on_value` and `off_value` must have matching data types. If `dtype` is also\n provided, they must be the same data type as specified by `dtype`.\n\n If `on_value` is not provided, it will default to the value `1` with type\n `dtype`\n\n If `off_value` is not provided, it will default to the value `0` with type\n `dtype`\n\n If the input `indices` is rank `N`, the output will have rank `N+1`. The\n new axis is created at dimension `axis` (default: the new axis is appended\n at the end).\n\n If `indices` is a scalar the output shape will be a vector of length `depth`\n\n If `indices` is a vector of length `features`, the output shape will be:\n\n ```\n features x depth if axis == -1\n depth x features if axis == 0\n ```\n\n If `indices` is a matrix (batch) with shape `[batch, features]`, the output\n shape will be:\n\n ```\n batch x features x depth if axis == -1\n batch x depth x features if axis == 1\n depth x batch x features if axis == 0\n ```\n\n If `dtype` is not provided, it will attempt to assume the data type of\n `on_value` or `off_value`, if one or both are passed in. If none of\n `on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the\n value `tf.float32`.\n\n Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,\n etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.\n\n For example:\n\n ```python\n indices = [0, 1, 2]\n depth = 3\n tf.one_hot(indices, depth) # output: [3 x 3]\n # [[1., 0., 0.],\n # [0., 1., 0.],\n # [0., 0., 1.]]\n\n indices = [0, 2, -1, 1]\n depth = 3\n tf.one_hot(indices, depth,\n on_value=5.0, off_value=0.0,\n axis=-1) # output: [4 x 3]\n # [[5.0, 0.0, 0.0], # one_hot(0)\n # [0.0, 0.0, 5.0], # one_hot(2)\n # [0.0, 0.0, 0.0], # one_hot(-1)\n # [0.0, 5.0, 0.0]] # one_hot(1)\n\n indices = [[0, 2], [1, -1]]\n depth = 3\n tf.one_hot(indices, depth,\n on_value=1.0, off_value=0.0,\n axis=-1) # output: [2 x 2 x 3]\n # [[[1.0, 0.0, 0.0], # one_hot(0)\n # [0.0, 0.0, 1.0]], # one_hot(2)\n # [[0.0, 1.0, 0.0], # one_hot(1)\n # [0.0, 0.0, 0.0]]] # one_hot(-1)\n ```\n\n Args:\n indices: A `Tensor` of indices.\n depth: A scalar defining the depth of the one hot dimension.\n on_value: A scalar defining the value to fill in output when `indices[j]\n = i`. (default: 1)\n off_value: A scalar defining the value to fill in output when `indices[j]\n != i`. (default: 0)\n axis: The axis to fill (default: -1, a new inner-most axis).\n dtype: The data type of the output tensor.\n name: A name for the operation (optional).\n\n Returns:\n output: The one-hot tensor.\n\n Raises:\n TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`\n TypeError: If dtype of `on_value` and `off_value` don't match one another\n \"\"\"\n with ops.name_scope(name, \"one_hot\",\n [indices, depth, on_value, off_value, axis,\n dtype]) as name:\n on_exists = on_value is not None\n off_exists = off_value is not None\n\n on_dtype = (ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists\n else None)\n off_dtype = (ops.convert_to_tensor(off_value).dtype.base_dtype if off_exists\n else None)\n\n if on_exists or off_exists:\n if dtype is not None:\n # Ensure provided on_value and/or off_value match dtype\n if on_exists and on_dtype != dtype:\n raise TypeError(\"dtype {0} of on_value does not match \"\n \"dtype parameter {1}\".format(on_dtype, dtype))\n if off_exists and off_dtype != dtype:\n raise TypeError(\"dtype {0} of off_value does not match \"\n \"dtype parameter {1}\".format(off_dtype, dtype))\n else:\n # dtype not provided: automatically assign it\n dtype = on_dtype if on_exists else off_dtype\n elif dtype is None:\n # None of on_value, off_value, or dtype provided. Default dtype to float32\n dtype = dtypes.float32\n\n if not on_exists:\n # on_value not provided: assign to value 1 of type dtype\n on_value = ops.convert_to_tensor(1, dtype, name=\"on_value\")\n on_dtype = dtype\n if not off_exists:\n # off_value not provided: assign to value 0 of type dtype\n off_value = ops.convert_to_tensor(0, dtype, name=\"off_value\")\n off_dtype = dtype\n\n if on_dtype != off_dtype:\n raise TypeError(\"dtype {0} of on_value does not match \"\n \"dtype {1} of off_value\".format(on_dtype, off_dtype))\n\n return gen_array_ops._one_hot(indices, depth, on_value, off_value, axis,\n name)\n\n\ndef _all_dimensions(x):\n \"\"\"Returns a 1D-tensor listing all dimensions in x.\"\"\"\n # Fast path: avoid creating Rank and Range ops if ndims is known.\n if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:\n return constant_op.constant(\n np.arange(x.get_shape().ndims), dtype=dtypes.int32)\n if (isinstance(x, sparse_tensor.SparseTensor) and\n x.dense_shape.get_shape().is_fully_defined()):\n r = x.dense_shape.get_shape()[0].value # sparse.dense_shape is 1-D.\n return constant_op.constant(np.arange(r), dtype=dtypes.int32)\n\n # Otherwise, we rely on `range` and `rank` to do the right thing at runtime.\n return gen_math_ops._range(0, rank(x), 1)\n\n\n@tf_export(\"sequence_mask\")\ndef sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):\n \"\"\"Returns a mask tensor representing the first N positions of each cell.\n\n If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has\n dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with\n\n ```\n mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])\n ```\n\n Examples:\n\n ```python\n tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],\n # [True, True, True, False, False],\n # [True, True, False, False, False]]\n\n tf.sequence_mask([[1, 3],[2,0]]) # [[[True, False, False],\n # [True, True, True]],\n # [[True, True, False],\n # [False, False, False]]]\n ```\n\n Args:\n lengths: integer tensor, all its values <= maxlen.\n maxlen: scalar integer tensor, size of last dimension of returned tensor.\n Default is the maximum value in `lengths`.\n dtype: output type of the resulting tensor.\n name: name of the op.\n Returns:\n A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.\n Raises:\n ValueError: if `maxlen` is not a scalar.\n \"\"\"\n with ops.name_scope(name, \"SequenceMask\", [lengths, maxlen]):\n lengths = ops.convert_to_tensor(lengths)\n\n if maxlen is None:\n maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))\n else:\n maxlen = ops.convert_to_tensor(maxlen)\n if maxlen.get_shape().ndims is not None and maxlen.get_shape().ndims != 0:\n raise ValueError(\"maxlen must be scalar for sequence_mask\")\n\n # The basic idea is to compare a range row vector of size maxlen:\n # [0, 1, 2, 3, 4]\n # to length as a matrix with 1 column: [[1], [3], [2]].\n # Because of broadcasting on both arguments this comparison results\n # in a matrix of size (len(lengths), maxlen)\n row_vector = gen_math_ops._range(\n constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))\n # Since maxlen >= max(lengths), it is safe to use maxlen as a cast\n # authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.\n matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)\n result = row_vector < matrix\n\n if dtype is None or result.dtype.base_dtype == dtype.base_dtype:\n return result\n else:\n return gen_math_ops.cast(result, dtype)\n\n\n@tf_export(\"squeeze\")\ndef squeeze(input, axis=None, name=None, squeeze_dims=None):\n # pylint: disable=redefined-builtin\n \"\"\"Removes dimensions of size 1 from the shape of a tensor.\n\n Given a tensor `input`, this operation returns a tensor of the same type with\n all dimensions of size 1 removed. If you don't want to remove all size 1\n dimensions, you can remove specific size 1 dimensions by specifying\n `axis`.\n\n For example:\n\n ```python\n # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\n tf.shape(tf.squeeze(t)) # [2, 3]\n ```\n\n Or, to remove specific size 1 dimensions:\n\n ```python\n # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\n tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]\n ```\n\n Args:\n input: A `Tensor`. The `input` to squeeze.\n axis: An optional list of `ints`. Defaults to `[]`.\n If specified, only squeezes the dimensions listed. The dimension\n index starts at 0. It is an error to squeeze a dimension that is not 1.\n Must be in the range `[-rank(input), rank(input))`.\n name: A name for the operation (optional).\n squeeze_dims: Deprecated keyword argument that is now axis.\n\n Returns:\n A `Tensor`. Has the same type as `input`.\n Contains the same data as `input`, but has one or more dimensions of\n size 1 removed.\n\n Raises:\n ValueError: When both `squeeze_dims` and `axis` are specified.\n \"\"\"\n if squeeze_dims is not None:\n if axis is not None:\n raise ValueError(\"Cannot specify both 'squeeze_dims' and 'axis'\")\n axis = squeeze_dims\n if np.isscalar(axis):\n axis = [axis]\n return gen_array_ops._squeeze(input, axis, name)\n\n\n@tf_export(\"where\")\ndef where(condition, x=None, y=None, name=None):\n \"\"\"Return the elements, either from `x` or `y`, depending on the `condition`.\n\n If both `x` and `y` are None, then this operation returns the coordinates of\n true elements of `condition`. The coordinates are returned in a 2-D tensor\n where the first dimension (rows) represents the number of true elements, and\n the second dimension (columns) represents the coordinates of the true\n elements. Keep in mind, the shape of the output tensor can vary depending on\n how many true values there are in input. Indices are output in row-major\n order.\n\n If both non-None, `x` and `y` must have the same shape.\n The `condition` tensor must be a scalar if `x` and `y` are scalar.\n If `x` and `y` are vectors of higher rank, then `condition` must be either a\n vector with size matching the first dimension of `x`, or must have the same\n shape as `x`.\n\n The `condition` tensor acts as a mask that chooses, based on the value at each\n element, whether the corresponding element / row in the output should be taken\n from `x` (if true) or `y` (if false).\n\n If `condition` is a vector and `x` and `y` are higher rank matrices, then it\n chooses which row (outer dimension) to copy from `x` and `y`. If `condition`\n has the same shape as `x` and `y`, then it chooses which element to copy from\n `x` and `y`.\n\n Args:\n condition: A `Tensor` of type `bool`\n x: A Tensor which may have the same shape as `condition`. If `condition` is\n rank 1, `x` may have higher rank, but its first dimension must match the\n size of `condition`.\n y: A `tensor` with the same shape and type as `x`.\n name: A name of the operation (optional)\n\n Returns:\n A `Tensor` with the same type and shape as `x`, `y` if they are non-None.\n A `Tensor` with shape `(num_true, dim_size(condition))`.\n\n Raises:\n ValueError: When exactly one of `x` or `y` is non-None.\n \"\"\"\n if x is None and y is None:\n with ops.name_scope(name, \"Where\", [condition]) as name:\n condition = ops.convert_to_tensor(\n condition, preferred_dtype=dtypes.bool, name=\"condition\")\n return gen_array_ops.where(condition=condition, name=name)\n elif x is not None and y is not None:\n return gen_math_ops._select(condition=condition, x=x, y=y, name=name)\n else:\n raise ValueError(\"x and y must both be non-None or both be None.\")\n\n\n@tf_export(\"reverse\")\ndef reverse(tensor, axis, name=None):\n return gen_array_ops.reverse_v2(tensor, axis, name)\n\n\nreverse.__doc__ = gen_array_ops.reverse_v2.__doc__\n\n\n# pylint: disable=redefined-builtin\n@tf_export(\"reverse_sequence\")\ndef reverse_sequence(input,\n seq_lengths,\n seq_axis=None,\n batch_axis=None,\n name=None,\n seq_dim=None,\n batch_dim=None):\n seq_axis = deprecation.deprecated_argument_lookup(\"seq_axis\", seq_axis,\n \"seq_dim\", seq_dim)\n batch_axis = deprecation.deprecated_argument_lookup(\"batch_axis\", batch_axis,\n \"batch_dim\", batch_dim)\n return gen_array_ops.reverse_sequence(\n input=input,\n seq_lengths=seq_lengths,\n seq_dim=seq_axis,\n batch_dim=batch_axis,\n name=name)\n\n\n# pylint: enable=redefined-builtin\n\nreverse_sequence.__doc__ = deprecation.rewrite_argument_docstring(\n deprecation.rewrite_argument_docstring(\n gen_array_ops.reverse_sequence.__doc__, \"batch_dim\", \"batch_axis\"),\n \"seq_dim\", \"seq_axis\")\n\n\n@tf_export(\"gather\")\ndef gather(params, indices, validate_indices=None, name=None, axis=0):\n # TODO(rjryan): Remove \"Gather\" creation in favor of GatherV2 once the forward\n # compatibility 3 week period has passed.\n if axis == 0:\n return gen_array_ops.gather(\n params, indices, validate_indices=validate_indices, name=name)\n else:\n return gen_array_ops.gather_v2(params, indices, axis, name=name)\n\n\ngather.__doc__ = gen_array_ops.gather_v2.__doc__\n\n\n# Define quantize_v2 here in order to make name the second-to-last attribute,\n# because round_mode was added later.\n@tf_export(\"quantize_v2\")\[email protected](\n \"2017-10-25\",\n \"`tf.quantize_v2` is deprecated, please use `tf.quantize` instead.\")\ndef quantize_v2(input, # pylint: disable=redefined-builtin\n min_range,\n max_range,\n T,\n mode=\"MIN_COMBINED\",\n name=None,\n round_mode=\"HALF_AWAY_FROM_ZERO\"):\n return gen_array_ops.quantize_v2(input,\n min_range,\n max_range,\n T=T,\n mode=mode,\n name=name,\n round_mode=round_mode)\n\n\nquantize_v2.__doc__ = \"\"\"Please use `tf.quantize` instead.\"\"\"\n\n\n# We want to expose tf.quantize instead of tf.quantize_v2; we can deprecate\n# tf.quantize_v2 in next version of TensorFlow.\n@tf_export(\"quantize\")\ndef quantize(input, # pylint: disable=redefined-builtin\n min_range,\n max_range,\n T,\n mode=\"MIN_COMBINED\",\n round_mode=\"HALF_AWAY_FROM_ZERO\",\n name=None):\n return gen_array_ops.quantize_v2(\n input,\n min_range,\n max_range,\n T,\n mode=mode,\n round_mode=round_mode,\n name=name)\n\n\nquantize.__doc__ = gen_array_ops.quantize_v2.__doc__\n"
] |
[
[
"tensorflow.python.framework.tensor_shape.scalar",
"tensorflow.python.ops.gen_math_ops._range",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.gen_array_ops._mirror_pad",
"tensorflow.python.ops.gen_array_ops._split_v",
"tensorflow.python.ops.gen_array_ops.gather_v2",
"tensorflow.python.ops.gen_array_ops.reverse_sequence",
"tensorflow.python.framework.tensor_util.constant_value_as_shape",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.python.ops.gen_array_ops._pad",
"tensorflow.python.framework.ops.is_dense_tensor_like",
"tensorflow.python.eager.context.in_eager_mode",
"tensorflow.python.ops.gen_array_ops.quantize_v2",
"tensorflow.python.ops.gen_array_ops._placeholder",
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context.context",
"tensorflow.python.ops.gen_array_ops.space_to_depth",
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.python.ops.gen_array_ops.where",
"tensorflow.python.ops.gen_array_ops.strided_slice",
"tensorflow.python.ops.gen_array_ops.gather",
"numpy.arange",
"tensorflow.python.framework.ops.IndexedSlices",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.gen_array_ops.rank",
"tensorflow.python.ops.gen_array_ops._zeros_like",
"tensorflow.python.ops.gen_array_ops.identity",
"tensorflow.python.ops.gen_array_ops.depth_to_space",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.framework.common_shapes.broadcast_shape",
"tensorflow.python.ops.gen_array_ops._split",
"tensorflow.python.framework.ops.register_tensor_conversion_function",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.ops.gen_math_ops._select",
"tensorflow.python.ops.gen_array_ops._broadcast_args",
"tensorflow.python.ops.gen_array_ops.reverse_v2",
"tensorflow.python.ops.gen_array_ops._expand_dims",
"tensorflow.python.ops.gen_array_ops.shape_n",
"tensorflow.python.ops.gen_array_ops._concat_v2",
"tensorflow.python.ops.gen_array_ops._slice",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.ops.gen_array_ops._pack",
"tensorflow.python.framework.ops.Tensor._override_operator",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.tensor_shape.unknown_shape",
"numpy.array",
"tensorflow.python.ops.gen_array_ops._list_diff",
"tensorflow.python.ops.gen_array_ops._unique",
"tensorflow.python.util.deprecation.rewrite_argument_docstring",
"tensorflow.python.ops.gen_array_ops._squeeze",
"tensorflow.python.eager.context.in_graph_mode",
"tensorflow.python.ops.gen_math_ops.cast",
"tensorflow.python.ops.gen_array_ops._pad_v2",
"tensorflow.python.ops.gen_array_ops._edit_distance",
"tensorflow.python.ops.gen_array_ops._one_hot",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.gen_array_ops.shape",
"tensorflow.python.ops.gen_array_ops._unpack",
"numpy.isscalar",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.gen_array_ops.size",
"tensorflow.python.util.deprecation.deprecated_argument_lookup"
]
] |
shubham-goel/idr
|
[
"44959e7aac267775e63552d8aac6c2e9f2918cca"
] |
[
"code/training/idr_train.py"
] |
[
"import os\nfrom datetime import datetime\nfrom pyhocon import ConfigFactory\nimport sys\nimport torch\n\nimport utils.general as utils\nimport utils.plots as plt\n\nclass IDRTrainRunner():\n def __init__(self,**kwargs):\n torch.set_default_dtype(torch.float32)\n torch.set_num_threads(1)\n\n self.conf = ConfigFactory.parse_file(kwargs['conf'])\n self.batch_size = kwargs['batch_size']\n self.nepochs = kwargs['nepochs']\n self.exps_folder_name = kwargs['exps_folder_name']\n self.GPU_INDEX = kwargs['gpu_index']\n self.train_cameras = kwargs['train_cameras']\n\n self.expname = self.conf.get_string('train.expname') + kwargs['expname']\n scan_id = kwargs['scan_id'] if kwargs['scan_id'] != -1 else self.conf.get_int('dataset.scan_id', default=-1)\n if scan_id != -1:\n self.expname = self.expname + '_{0}'.format(scan_id)\n\n if kwargs['is_continue'] and kwargs['timestamp'] == 'latest':\n if os.path.exists(os.path.join('../',kwargs['exps_folder_name'],self.expname)):\n timestamps = os.listdir(os.path.join('../',kwargs['exps_folder_name'],self.expname))\n if (len(timestamps)) == 0:\n is_continue = False\n timestamp = None\n else:\n timestamp = sorted(timestamps)[-1]\n is_continue = True\n else:\n is_continue = False\n timestamp = None\n else:\n timestamp = kwargs['timestamp']\n is_continue = kwargs['is_continue']\n\n utils.mkdir_ifnotexists(os.path.join('../',self.exps_folder_name))\n self.expdir = os.path.join('../', self.exps_folder_name, self.expname)\n utils.mkdir_ifnotexists(self.expdir)\n self.timestamp = '{:%Y_%m_%d_%H_%M_%S}'.format(datetime.now())\n utils.mkdir_ifnotexists(os.path.join(self.expdir, self.timestamp))\n\n self.plots_dir = os.path.join(self.expdir, self.timestamp, 'plots')\n utils.mkdir_ifnotexists(self.plots_dir)\n\n # create checkpoints dirs\n self.checkpoints_path = os.path.join(self.expdir, self.timestamp, 'checkpoints')\n utils.mkdir_ifnotexists(self.checkpoints_path)\n self.model_params_subdir = \"ModelParameters\"\n self.optimizer_params_subdir = \"OptimizerParameters\"\n self.scheduler_params_subdir = \"SchedulerParameters\"\n\n utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.model_params_subdir))\n utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.optimizer_params_subdir))\n utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.scheduler_params_subdir))\n\n if self.train_cameras:\n self.optimizer_cam_params_subdir = \"OptimizerCamParameters\"\n self.cam_params_subdir = \"CamParameters\"\n\n utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.optimizer_cam_params_subdir))\n utils.mkdir_ifnotexists(os.path.join(self.checkpoints_path, self.cam_params_subdir))\n\n os.system(\"\"\"cp -r {0} \"{1}\" \"\"\".format(kwargs['conf'], os.path.join(self.expdir, self.timestamp, 'runconf.conf')))\n\n if (not self.GPU_INDEX == 'ignore'):\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '{0}'.format(self.GPU_INDEX)\n\n print('shell command : {0}'.format(' '.join(sys.argv)))\n\n print('Loading data ...')\n\n dataset_conf = self.conf.get_config('dataset')\n if kwargs['scan_id'] != -1:\n dataset_conf['scan_id'] = kwargs['scan_id']\n\n self.train_dataset = utils.get_class(self.conf.get_string('train.dataset_class'))(self.train_cameras,\n **dataset_conf)\n\n print('Finish loading data ...')\n\n self.train_dataloader = torch.utils.data.DataLoader(self.train_dataset,\n batch_size=self.batch_size,\n shuffle=True,\n collate_fn=self.train_dataset.collate_fn\n )\n self.plot_dataloader = torch.utils.data.DataLoader(self.train_dataset,\n batch_size=self.conf.get_int('plot.plot_nimgs'),\n shuffle=True,\n collate_fn=self.train_dataset.collate_fn\n )\n\n self.model = utils.get_class(self.conf.get_string('train.model_class'))(conf=self.conf.get_config('model'))\n if torch.cuda.is_available():\n self.model.cuda()\n\n self.loss = utils.get_class(self.conf.get_string('train.loss_class'))(**self.conf.get_config('loss'))\n\n self.lr = self.conf.get_float('train.learning_rate')\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n self.sched_milestones = self.conf.get_list('train.sched_milestones', default=[])\n self.sched_factor = self.conf.get_float('train.sched_factor', default=0.0)\n self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, self.sched_milestones, gamma=self.sched_factor)\n\n # settings for camera optimization\n if self.train_cameras:\n num_images = len(self.train_dataset)\n self.pose_vecs = torch.nn.Embedding(num_images, 7, sparse=True).cuda()\n self.pose_vecs.weight.data.copy_(self.train_dataset.get_pose_init())\n\n self.optimizer_cam = torch.optim.SparseAdam(self.pose_vecs.parameters(), self.conf.get_float('train.learning_rate_cam'))\n\n self.start_epoch = 0\n if is_continue:\n old_checkpnts_dir = os.path.join(self.expdir, timestamp, 'checkpoints')\n\n saved_model_state = torch.load(\n os.path.join(old_checkpnts_dir, 'ModelParameters', str(kwargs['checkpoint']) + \".pth\"))\n self.model.load_state_dict(saved_model_state[\"model_state_dict\"])\n self.start_epoch = saved_model_state['epoch']\n\n data = torch.load(\n os.path.join(old_checkpnts_dir, 'OptimizerParameters', str(kwargs['checkpoint']) + \".pth\"))\n self.optimizer.load_state_dict(data[\"optimizer_state_dict\"])\n\n data = torch.load(\n os.path.join(old_checkpnts_dir, self.scheduler_params_subdir, str(kwargs['checkpoint']) + \".pth\"))\n self.scheduler.load_state_dict(data[\"scheduler_state_dict\"])\n\n if self.train_cameras:\n data = torch.load(\n os.path.join(old_checkpnts_dir, self.optimizer_cam_params_subdir, str(kwargs['checkpoint']) + \".pth\"))\n self.optimizer_cam.load_state_dict(data[\"optimizer_cam_state_dict\"])\n\n data = torch.load(\n os.path.join(old_checkpnts_dir, self.cam_params_subdir, str(kwargs['checkpoint']) + \".pth\"))\n self.pose_vecs.load_state_dict(data[\"pose_vecs_state_dict\"])\n\n self.num_pixels = self.conf.get_int('train.num_pixels')\n self.total_pixels = self.train_dataset.total_pixels\n self.img_res = self.train_dataset.img_res\n self.n_batches = len(self.train_dataloader)\n self.plot_freq = self.conf.get_int('train.plot_freq')\n self.plot_conf = self.conf.get_config('plot')\n\n self.alpha_milestones = self.conf.get_list('train.alpha_milestones', default=[])\n self.alpha_factor = self.conf.get_float('train.alpha_factor', default=0.0)\n for acc in self.alpha_milestones:\n if self.start_epoch > acc:\n self.loss.alpha = self.loss.alpha * self.alpha_factor\n\n def save_checkpoints(self, epoch):\n torch.save(\n {\"epoch\": epoch, \"model_state_dict\": self.model.state_dict()},\n os.path.join(self.checkpoints_path, self.model_params_subdir, str(epoch) + \".pth\"))\n torch.save(\n {\"epoch\": epoch, \"model_state_dict\": self.model.state_dict()},\n os.path.join(self.checkpoints_path, self.model_params_subdir, \"latest.pth\"))\n\n torch.save(\n {\"epoch\": epoch, \"optimizer_state_dict\": self.optimizer.state_dict()},\n os.path.join(self.checkpoints_path, self.optimizer_params_subdir, str(epoch) + \".pth\"))\n torch.save(\n {\"epoch\": epoch, \"optimizer_state_dict\": self.optimizer.state_dict()},\n os.path.join(self.checkpoints_path, self.optimizer_params_subdir, \"latest.pth\"))\n\n torch.save(\n {\"epoch\": epoch, \"scheduler_state_dict\": self.scheduler.state_dict()},\n os.path.join(self.checkpoints_path, self.scheduler_params_subdir, str(epoch) + \".pth\"))\n torch.save(\n {\"epoch\": epoch, \"scheduler_state_dict\": self.scheduler.state_dict()},\n os.path.join(self.checkpoints_path, self.scheduler_params_subdir, \"latest.pth\"))\n\n if self.train_cameras:\n torch.save(\n {\"epoch\": epoch, \"optimizer_cam_state_dict\": self.optimizer_cam.state_dict()},\n os.path.join(self.checkpoints_path, self.optimizer_cam_params_subdir, str(epoch) + \".pth\"))\n torch.save(\n {\"epoch\": epoch, \"optimizer_cam_state_dict\": self.optimizer_cam.state_dict()},\n os.path.join(self.checkpoints_path, self.optimizer_cam_params_subdir, \"latest.pth\"))\n\n torch.save(\n {\"epoch\": epoch, \"pose_vecs_state_dict\": self.pose_vecs.state_dict()},\n os.path.join(self.checkpoints_path, self.cam_params_subdir, str(epoch) + \".pth\"))\n torch.save(\n {\"epoch\": epoch, \"pose_vecs_state_dict\": self.pose_vecs.state_dict()},\n os.path.join(self.checkpoints_path, self.cam_params_subdir, \"latest.pth\"))\n\n def run(self):\n print(\"training...\")\n\n for epoch in range(self.start_epoch, self.nepochs + 1):\n\n if epoch in self.alpha_milestones:\n self.loss.alpha = self.loss.alpha * self.alpha_factor\n\n if epoch % 100 == 0:\n self.save_checkpoints(epoch)\n\n if epoch % self.plot_freq == 0:\n self.model.eval()\n if self.train_cameras:\n self.pose_vecs.eval()\n self.train_dataset.change_sampling_idx(-1)\n indices, model_input, ground_truth = next(iter(self.plot_dataloader))\n\n model_input[\"intrinsics\"] = model_input[\"intrinsics\"].cuda()\n model_input[\"uv\"] = model_input[\"uv\"].cuda()\n model_input[\"object_mask\"] = model_input[\"object_mask\"].cuda()\n\n if self.train_cameras:\n pose_input = self.pose_vecs(indices.cuda())\n model_input['pose'] = pose_input\n else:\n model_input['pose'] = model_input['pose'].cuda()\n\n split = utils.split_input(model_input, self.total_pixels)\n res = []\n for s in split:\n out = self.model(s)\n res.append({\n 'points': out['points'].detach(),\n 'rgb_values': out['rgb_values'].detach(),\n 'network_object_mask': out['network_object_mask'].detach(),\n 'object_mask': out['object_mask'].detach()\n })\n\n batch_size = ground_truth['rgb'].shape[0]\n model_outputs = utils.merge_output(res, self.total_pixels, batch_size)\n\n plt.plot(self.model,\n indices,\n model_outputs,\n model_input['pose'],\n ground_truth['rgb'],\n self.plots_dir,\n epoch,\n self.img_res,\n **self.plot_conf\n )\n\n self.model.train()\n if self.train_cameras:\n self.pose_vecs.train()\n\n self.train_dataset.change_sampling_idx(self.num_pixels)\n\n for data_index, (indices, model_input, ground_truth) in enumerate(self.train_dataloader):\n\n model_input[\"intrinsics\"] = model_input[\"intrinsics\"].cuda()\n model_input[\"uv\"] = model_input[\"uv\"].cuda()\n model_input[\"object_mask\"] = model_input[\"object_mask\"].cuda()\n\n if self.train_cameras:\n pose_input = self.pose_vecs(indices.cuda())\n model_input['pose'] = pose_input\n else:\n model_input['pose'] = model_input['pose'].cuda()\n\n model_outputs = self.model(model_input)\n loss_output = self.loss(model_outputs, ground_truth)\n\n loss = loss_output['loss']\n\n self.optimizer.zero_grad()\n if self.train_cameras:\n self.optimizer_cam.zero_grad()\n\n loss.backward()\n\n self.optimizer.step()\n if self.train_cameras:\n self.optimizer_cam.step()\n\n print(\n '{0} [{1}] ({2}/{3}): loss = {4}, rgb_loss = {5}, eikonal_loss = {6}, mask_loss = {7}, alpha = {8}, lr = {9}'\n .format(self.expname, epoch, data_index, self.n_batches, loss.item(),\n loss_output['rgb_loss'].item(),\n loss_output['eikonal_loss'].item(),\n loss_output['mask_loss'].item(),\n self.loss.alpha,\n self.scheduler.get_lr()[0]))\n\n self.scheduler.step()\n"
] |
[
[
"torch.optim.lr_scheduler.MultiStepLR",
"torch.set_default_dtype",
"torch.utils.data.DataLoader",
"torch.nn.Embedding",
"torch.set_num_threads",
"torch.cuda.is_available"
]
] |
ChenlingJ/MCycle
|
[
"1fb72be3f889bd5f30c5dbd927f31b3bca2f1c38"
] |
[
"mcycle/defaults.py"
] |
[
"from .logger import log\r\nfrom .constants import *\r\nimport CoolProp as CP\r\n\r\nTOLATTR = 'h'\r\nTOLABS = 1e-7\r\nTOLREL = 1e-7\r\nDIV_T = 5.\r\nDIV_X = 0.1\r\nMAXITER_CYCLE = 50\r\nMAXITER_COMPONENT = 50\r\nMAX_WALLS = 200\r\nTRY_BUILD_PHASE_ENVELOPE = True\r\nGRAVITY = 9.80665\r\nDP_PORT_IN_FACTOR = 1.0\r\nDP_PORT_OUT_FACTOR = 0.4\r\nCOOLPROP_EOS = 'HEOS'\r\nMPL_BACKEND = 'TkAgg'\r\nPLOT_DIR = '.'\r\nPLOT_DPI = 600\r\nPLOT_FORMAT = 'png'\r\nPLOT_COLOR = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']\r\n#PLOT_COLOR = ['0', '0.5', '0.2', '0.7', '0.4', '0.1', '0.8', '0.3'] #grayscale\r\nLINESTYLES = {\r\n 'solid': (0, ()),\r\n 'loosely dotted': (0, (1, 10)),\r\n 'dotted': (0, (1, 5)),\r\n 'densely dotted': (0, (1, 1)),\r\n 'loosely dashed': (0, (5, 10)),\r\n 'dashed': (0, (5, 5)),\r\n 'densely dashed': (0, (5, 1)),\r\n 'loosely dashdotted': (0, (3, 10, 1, 10)),\r\n 'dashdotted': (0, (3, 5, 1, 5)),\r\n 'densely dashdotted': (0, (3, 1, 1, 1)),\r\n 'loosely dashdotdotted': (0, (3, 10, 1, 10, 1, 10)),\r\n 'dashdotdotted': (0, (3, 5, 1, 5, 1, 5)),\r\n 'densely dashdotdotted': (0, (3, 1, 1, 1, 1, 1))\r\n} # https://matplotlib.org/gallery/lines_bars_and_markers/linestyles.html\r\nPLOT_LINESTYLE = [\r\n LINESTYLES[style] for style in [\r\n 'solid', 'densely dashdotted', 'densely dashed', 'densely dotted',\r\n 'densely dashdotdotted', 'dashed'\r\n ]\r\n]\r\nPLOT_MARKER = [''] #['.', 'x', 's', 'v', '^', 'x', 'p', 'D', '']\r\n#\r\nUNITS_SEPARATOR_NUMERATOR = '.'\r\nUNITS_SEPARATOR_DENOMINATOR = '.'\r\nUNITS_FORMAT = 'comma' # '', 'parentheses', 'brackets', 'braces', 'comma', with or without suffix '-nospace'\r\nPRINT_FORMAT_FLOAT = '{: .4e}'\r\nRST_HEADINGS = ['=', '-', '^', '\"']\r\n\r\nCONFIG = None\r\nMETHODS = {\r\n 'GeomHxPlateChevron': {\r\n TRANSFER_HEAT: {\r\n UNITPHASE_ALL: \"chisholmWannairachchi_sp\",\r\n UNITPHASE_TWOPHASE_EVAPORATING: \"yanLin_tpEvap\",\r\n UNITPHASE_TWOPHASE_CONDENSING: \"hanLeeKim_tpCond\"\r\n },\r\n TRANSFER_FRICTION: {\r\n UNITPHASE_ALL: \"chisholmWannairachchi_sp\",\r\n UNITPHASE_TWOPHASE_EVAPORATING: \"yanLin_tpEvap\",\r\n UNITPHASE_TWOPHASE_CONDENSING: \"hanLeeKim_tpCond\"\r\n }\r\n },\r\n 'GeomHxPlateFinStraight': {\r\n TRANSFER_HEAT: {\r\n UNITPHASE_ALL: \"petukhovPopov_sp_h\",\r\n UNITPHASE_ALL_TWOPHASE: \"\"\r\n },\r\n TRANSFER_FRICTION: {\r\n UNITPHASE_ALL: \"bhattiShah_sp_f\",\r\n UNITPHASE_ALL_TWOPHASE: \"\"\r\n }\r\n },\r\n 'GeomHxPlateFinOffset': {\r\n TRANSFER_HEAT: {\r\n UNITPHASE_ALL: \"manglikBergles_offset_sp\",\r\n UNITPHASE_ALL_TWOPHASE: \"\"\r\n },\r\n TRANSFER_FRICTION: {\r\n UNITPHASE_ALL: \"manglikBergles_offset_sp\",\r\n UNITPHASE_ALL_TWOPHASE: \"\"\r\n }\r\n },\r\n 'GeomHxPlateSmooth': {\r\n TRANSFER_HEAT: {\r\n UNITPHASE_ALL: \"shibani_sp_h\",\r\n UNITPHASE_TWOPHASE_EVAPORATING: \"huang_tpEvap_h\",\r\n UNITPHASE_TWOPHASE_CONDENSING: \"\"\r\n },\r\n TRANSFER_FRICTION: {\r\n UNITPHASE_ALL: \"rothfus_sp_f\",\r\n UNITPHASE_ALL_TWOPHASE: \"\"\r\n }\r\n },\r\n 'Geom Name Here': {\r\n TRANSFER_HEAT: {\r\n UNITPHASE_ALL: \"\",\r\n UNITPHASE_VAPOUR: \"\",\r\n UNITPHASE_TWOPHASE_EVAPORATING: \"\",\r\n UNITPHASE_TWOPHASE_CONDENSING: \"\",\r\n WORKING_FLUID: {\r\n UNITPHASE_VAPOUR: \"\",\r\n UNITPHASE_TWOPHASE_CONDENSING: \"\"\r\n },\r\n SECONDARY_FLUID: {\r\n UNITPHASE_VAPOUR: \"\"\r\n }\r\n },\r\n TRANSFER_FRICTION: {\r\n UNITPHASE_ALL: \"\",\r\n WORKING_FLUID: {\r\n UNITPHASE_LIQUID: \"\",\r\n },\r\n SECONDARY_FLUID: {\r\n UNITPHASE_LIQUID: \"\",\r\n }\r\n }\r\n },\r\n}\r\n\r\nDIMENSIONS = {\r\n 'A': {\r\n '': 'length^2'\r\n },\r\n 'ARatio': {\r\n '': ''\r\n },\r\n 'arrangement': {\r\n '': ''\r\n },\r\n 'b': {\r\n '': 'length'\r\n },\r\n 'beta': {\r\n '': 'angle'\r\n },\r\n 'cp': {\r\n '': 'energy/mass-temperature'\r\n },\r\n 'D': {\r\n '': 'length'\r\n },\r\n 'data': {\r\n '': ''\r\n },\r\n 'deg': {\r\n '': ''\r\n },\r\n 'dp': {\r\n '': 'pressure'\r\n },\r\n 'dpAcc': {\r\n '': 'pressure'\r\n },\r\n 'dpF': {\r\n '': 'pressure'\r\n },\r\n 'dpPort': {\r\n '': 'pressure'\r\n },\r\n 'efficiencyExergy': {\r\n '': ''\r\n },\r\n 'efficiencyIsentropic': {\r\n '': ''\r\n },\r\n 'efficiencyThermal': {\r\n '': ''\r\n },\r\n 'eos': {\r\n '': ''\r\n },\r\n 'fluid': {\r\n '': ''\r\n },\r\n 'h': {\r\n '': 'power/area-temperature',\r\n 'GeomHxPlateFinStraight': 'length',\r\n 'GeomHxPlateFinOffset': 'length',\r\n 'FlowState': 'energy/mass',\r\n 'FlowStatePoly': 'energy/mass'\r\n },\r\n 'I': {\r\n '': 'energy'\r\n },\r\n '_iphase': {\r\n '': ''\r\n },\r\n 'isEvap': {\r\n '': ''\r\n },\r\n 'k': {\r\n '': 'power/length-temperature'\r\n },\r\n 'l': {\r\n '': 'length'\r\n },\r\n 'L': {\r\n '': 'length'\r\n },\r\n 'm': {\r\n '': 'mass/time'\r\n },\r\n 'N': {\r\n '': ''\r\n },\r\n 'name': {\r\n '': ''\r\n },\r\n 'p': {\r\n '': 'pressure'\r\n },\r\n 'passes': {\r\n '': ''\r\n },\r\n 'phi': {\r\n '': ''\r\n },\r\n 'P': {\r\n '': 'power'\r\n },\r\n 'pitchCorr': {\r\n '': 'length'\r\n },\r\n 'Pr': {\r\n '': ''\r\n },\r\n 'pRatio': {\r\n '': ''\r\n },\r\n 'Q': {\r\n '': 'power'\r\n },\r\n 'QCool': {\r\n '': 'power'\r\n },\r\n 'QHeat': {\r\n '': 'power'\r\n },\r\n 'Rf': {\r\n '': 'fouling'\r\n },\r\n 'rho': {\r\n '': 'density'\r\n },\r\n 'roughness': {\r\n '': 'length/length'\r\n },\r\n 's': {\r\n '': 'energy/mass-temperature',\r\n 'GeomHxPlateFinStraight': 'length',\r\n 'GeomHxPlateFinOffset': 'length'\r\n },\r\n 'sense': {\r\n '': ''\r\n },\r\n 'subcool': {\r\n '': 'temperature'\r\n },\r\n 'superheat': {\r\n '': 'temperature'\r\n },\r\n 't': {\r\n '': 'length'\r\n },\r\n 'T': {\r\n '': 'temperature'\r\n },\r\n 'vertical': {\r\n '': ''\r\n },\r\n 'V': {\r\n '': 'length^3/time'\r\n },\r\n 'visc': {\r\n '': 'force-time/area'\r\n },\r\n 'W': {\r\n '': 'length'\r\n },\r\n 'x': {\r\n '': ''\r\n },\r\n}\r\n\r\n\r\ndef setupREFPROP(ALTERNATIVE_REFPROP_PATH='',\r\n ALTERNATIVE_REFPROP_LIBRARY_PATH='',\r\n ALTERNATIVE_REFPROP_HMX_BNC_PATH=''):\r\n \"\"\"Configures CoolProp to find your REFPROP files. Note the FLUIDS folder must be renamed to lowercase ``fluids`` and MIXTURES folder must be renamed to lowercase ``mixtures`` to be found by CoolProp (on Linux, not tested for Windows). See http://www.coolprop.org/coolprop/REFPROP.html#path-issues for more info about each configuration parameter.\"\"\"\r\n CP.CoolProp.set_config_string(CP.ALTERNATIVE_REFPROP_PATH,\r\n ALTERNATIVE_REFPROP_PATH)\r\n log('debug', 'CoolProp.ALTERNATIVE_REFPROP_PATH set to: \"{}\"'.format(\r\n ALTERNATIVE_REFPROP_PATH))\r\n CP.CoolProp.set_config_string(CP.ALTERNATIVE_REFPROP_LIBRARY_PATH,\r\n ALTERNATIVE_REFPROP_LIBRARY_PATH)\r\n log('debug',\r\n 'CoolProp.ALTERNATIVE_REFPROP_LIBRARY_PATH set to: \"{}\"'.format(\r\n ALTERNATIVE_REFPROP_LIBRARY_PATH))\r\n CP.CoolProp.set_config_string(CP.ALTERNATIVE_REFPROP_HMX_BNC_PATH,\r\n ALTERNATIVE_REFPROP_HMX_BNC_PATH)\r\n log('debug',\r\n 'CoolProp.ALTERNATIVE_REFPROP_HMX_BNC_PATH set to: \"{}\"'.format(\r\n ALTERNATIVE_REFPROP_HMX_BNC_PATH))\r\n\r\n\r\ndef makePlotDir(plotDir='default'):\r\n \"\"\"str: Return string of plots directory. Creates the directory if it does not yet exist.\"\"\"\r\n import os\r\n cwd = os.getcwd()\r\n if plotDir == \"\":\r\n plotDir = \".\"\r\n if plotDir == 'default':\r\n plotDir = PLOT_DIR\r\n else:\r\n globals()['PLOT_DIR'] = plotDir\r\n if not os.path.exists(plotDir):\r\n os.makedirs(plotDir)\r\n return plotDir\r\n\r\n\r\ndimensionUnits = {\r\n \"\": \"\",\r\n \"angle\": \"deg\",\r\n \"area\": \"m^2\",\r\n \"energy\": \"J\",\r\n \"force\": \"N\",\r\n \"length\": \"m\",\r\n \"mass\": \"kg\",\r\n \"power\": \"W\",\r\n \"pressure\": \"Pa\",\r\n \"temperature\": \"K\",\r\n \"time\": \"s\",\r\n \"volume\": \"m^3\"\r\n}\r\n\r\ndimensionsEquiv = {\r\n \"htc\": \"power/area-temperature\",\r\n \"conductivity\": \"power/length-temperature\",\r\n \"fouling\": \"area-temperature/power\",\r\n \"velocity\": \"length/time\",\r\n \"acceleration\": \"length/time^2\",\r\n \"density\": \"mass/volume\",\r\n}\r\n\r\nattributeSuffixes = [\r\n 'Wf', 'Sf', 'Wall', 'Plate', 'Port', 'Acc', 'Head', 'F', 'Vert', 'In',\r\n 'Out', 'Net', 'Evap', 'Exp', 'Cond', 'Comp'\r\n]\r\n\r\n\r\ndef getDimensions(attribute, className=''):\r\n \"\"\"str : Returns attribute dimensions from DIMENSIONS for a given class\r\n\r\nParameters\r\n-----------\r\nattribute : str\r\n Class attribute name\r\nclassName : str, optional\r\n Class name as string. Defaults to ''.\r\n \"\"\"\r\n if attribute.startswith('coeffs_'):\r\n return ''\r\n for suffix in attributeSuffixes:\r\n if suffix in attribute:\r\n attribute = attribute.split(suffix)[0]\r\n try:\r\n dimension_lookup = DIMENSIONS[attribute]\r\n if className in dimension_lookup:\r\n return dimension_lookup[className]\r\n else:\r\n return dimension_lookup['']\r\n except Exception as exc:\r\n log('debug',\r\n 'defaults.getDimensions: did not find dimensions for \"{}\". Consider raising an issue on Github.'.\r\n format(attribute), exc)\r\n return ''\r\n\r\n\r\ndef _formatUnits(dimensions, separator):\r\n dimList = dimensions.split(\"-\")\r\n units = []\r\n for dim in dimList:\r\n dimSplit = dim.split(\"^\")\r\n if len(dimSplit) == 1:\r\n units.append(dimensionUnits[dimSplit[0]])\r\n else:\r\n units.append(dimensionUnits[dimSplit[0]] + \"^\" + dimSplit[1])\r\n return separator.join(units)\r\n\r\n\r\ndef getUnits(dimension):\r\n \"\"\"str : Returns units for desired dimension (eg. \"length\"), a composite dimension (eg. \"power/length-temperature\") or an equivalent dimension (eg. \"density\").\"\"\"\r\n if dimension == \"\":\r\n return dimensionUnits[dimension]\r\n else:\r\n if dimension in dimensionsEquiv:\r\n dimension = dimensionsEquiv[dimension]\r\n dimSplit = dimension.split(\"/\")\r\n assert len(\r\n dimSplit\r\n ) <= 2, \"Dimension may not contain more than one divide symbol '/'\"\r\n output = _formatUnits(dimSplit[0], UNITS_SEPARATOR_NUMERATOR)\r\n if len(dimSplit) == 2:\r\n output += \"/\" + _formatUnits(dimSplit[1],\r\n UNITS_SEPARATOR_DENOMINATOR)\r\n return output\r\n\r\n\r\ndef getUnitsFormatted(dimension):\r\n \"\"\"str : Returns formatted units for desired dimension based on UNITS_FORMAT.\r\nEg, if UNITS_FORMAT=='brackets-nospace': return '[units]', if UNITS_FORMAT=='braces': return ' {units}'.\"\"\"\r\n units = getUnits(dimension)\r\n if units == \"\":\r\n return \"\"\r\n else:\r\n if UNITS_FORMAT == \"brackets\":\r\n units = \" (\" + units + \")\"\r\n elif UNITS_FORMAT == \"parentheses\":\r\n units = \" [\" + units + \"]\"\r\n elif UNITS_FORMAT == \"braces\":\r\n units = \" {\" + units + \"}\"\r\n elif UNITS_FORMAT == \"comma\":\r\n units = \", \" + units\r\n if 'nospace' in UNITS_FORMAT:\r\n units.replace(' ', '')\r\n return units\r\n\r\n\r\ndef check():\r\n \"\"\"Checks all defaults are valid, called when mcycle is imported.\"\"\"\r\n from warnings import warn\r\n import matplotlib\r\n import os\r\n\r\n validPlotFormats = ['png', 'PNG', 'jpg', 'JPG']\r\n assert PLOT_FORMAT in validPlotFormats, \"PLOT_FORMAT must be in {}, '{}' is invalid.\".format(\r\n validPlotFormats, PLOT_FORMAT)\r\n try:\r\n matplotlib.use(MPL_BACKEND)\r\n except Exception as exc:\r\n msg = \"Unable to use {} as Matplotlib backend: remains as {}\".format(\r\n MPL_BACKEND, matplotlib.get_backend())\r\n log('warning', msg, exc)\r\n warn(msg)\r\n assert MAXITER_CYCLE > 0, \"MAXITER_CYCLE must be >0, {} is invalid.\".format(\r\n MAXITER_CYCLE)\r\n assert MAXITER_COMPONENT > 0, \"MAXITER_COMPONENT must be >0, {} is invalid.\".format(\r\n MAXITER_COMPONENT)\r\n assert MAX_WALLS > 1, \"MAX_WALLS must be >1, {} is invalid.\".format(\r\n MAX_WALLS)\r\n unitsepnum = [\".\", \"-\"]\r\n if UNITS_SEPARATOR_NUMERATOR not in unitsepnum:\r\n print(\r\n \"It is recommended to select UNITS_SEPARATOR_NUMERATOR from {}, (given: {})\".\r\n format(unitsepnum, UNITS_SEPARATOR_NUMERATOR))\r\n unitsepdenom = [\".\", \"-\", \"/\"]\r\n if UNITS_SEPARATOR_DENOMINATOR not in unitsepdenom:\r\n print(\r\n \"It is recommended to select UNITS_SEPARATOR_DENOMINATOR from {}, (given: {})\".\r\n format(unitsepdenom, UNITS_SEPARATOR_DENOMINATOR))\r\n\r\n if globals()['COOLPROP_EOS'] == \"REFPROP\":\r\n try:\r\n CP.CoolProp.PropsSI(\"T\", \"P\", 101325, \"Q\", 0, \"REFPROP::Water\")\r\n except Exception as exc:\r\n msg = \"Failed to use REFPROP backend, setting back to 'HEOS'. Check error message in log and consider specifying your REFPROP directory using setupREFPROP()\"\r\n globals()['COOLPROP_EOS'] = \"HEOS\"\r\n log('warning', msg, exc)\r\n warn(msg)\r\n"
] |
[
[
"matplotlib.get_backend",
"matplotlib.use"
]
] |
Advitya-sharma/lidtk
|
[
"8122f3ddf801bbb1f254d69fd5d9f815fcdbb28e"
] |
[
"lidtk/features.py"
] |
[
"#!/usr/bin/env python\n\n\"\"\"Feature Extraction module.\"\"\"\n\n# core modules\nimport pickle\n\n# 3rd party modules\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n\ndef extract(cfg, text):\n \"\"\"\n Extract features.\n\n Parameters\n ----------\n cfg : dict\n\n Returns\n -------\n features : object\n \"\"\"\n if cfg['features']['type'] == 'raw':\n return text\n elif cfg['features']['type'] == 'tfidf':\n return get_tfidif_features(cfg, [text])[0]\n else:\n raise NotImplemented(\"Feature: {}\".format(cfg['features']['type']))\n\n\ndef get_dim(cfg):\n \"\"\"\n Get the dimension of the extracted features.\n\n Parameters\n ----------\n cfg : dict\n\n Returns\n -------\n feature_dim : int\n \"\"\"\n if cfg['features']['type'] == 'raw':\n raise NotImplemented(\"Feature: {}\".format(cfg['features']['type']))\n elif cfg['features']['type'] == 'tfidf':\n pass # TODO\n else:\n raise NotImplemented(\"Feature: {}\".format(cfg['features']['type']))\n\n\ndef train_tfidf_features(config, data):\n \"\"\"\n Get tf-idf features based on characters.\n\n Parameters\n ----------\n config : dict\n data : dict\n \"\"\"\n if config is None:\n config = {}\n if 'features' not in config:\n config['features'] = {}\n if 'min_df' not in config['features']:\n config['features']['min_df'] = 50\n vectorizer = TfidfVectorizer(analyzer='char',\n min_df=config['features']['min_df'],\n lowercase=config['features']['lowercase'],\n norm=config['features']['norm'])\n xs = {}\n vectorizer.fit(data['x_train'])\n # Serialize trained vectorizer\n with open(config['features']['name'], 'wb') as fin:\n pickle.dump(vectorizer, fin)\n for set_name in ['x_train', 'x_test', 'x_val']:\n xs[set_name] = vectorizer.transform(data[set_name]).toarray()\n return {'vectorizer': vectorizer, 'xs': xs}\n\n\ndef get_tfidif_features(cfg, samples):\n \"\"\"\n Get Tf-idf features for samples.\n\n Parameters\n ----------\n cfg : dict\n samples : ndarray\n\n Returns\n -------\n tfidf_features : ndarray\n \"\"\"\n if 'vectorizer' not in cfg['features']:\n # Load data (deserialize)\n with open(cfg['features']['vectorizer_path'], 'rb') as handle:\n vectorizer = pickle.load(handle)\n cfg['features']['vectorizer'] = vectorizer\n return vectorizer.transform(cfg['features']['vectorizer'])\n"
] |
[
[
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
hukefei/chongqing_contest
|
[
"c38ae3e6f25230282c65cdd568de93f28e88c6d6"
] |
[
"tools_2/preprocess/augument/test.py"
] |
[
"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n@author: sunchongjing\n@license: (C) Copyright 2019, Union Big Data Co. Ltd. All rights reserved.\n@contact: [email protected]\n@software: \n@file: test.py\n@time: 2019/9/9 18:20\n@desc:\n\"\"\"\n\nimport numpy as np\nimport cv2\nimport random\nfrom preprocess.augument.bbox_util import draw_rect\n\nimg = cv2.imread('/home/scj/mm_detection_proj/stations/boe_b2/trainData/Active_Remain(N)/A090960001BDN1_A_NA_106_4_201906201438_001.jpg')\nbboxes = np.array([[300, 400, 500, 600]], dtype=float)\n\ncv2.imwrite('test.jpg', draw_rect(img, bboxes))\n\nfrom preprocess.augument.data_aug import HorizontalFlip\n\nrotate = HorizontalFlip()\nnew_img, new_bbox = rotate.__call__(img, bboxes)\n#\n\n\n# import matplotlib\n# matplotlib.use('Agg')\n#\n# import matplotlib.pyplot as plt\n# plt.imshow(draw_rect(img, bboxes))\n# plt.show()\n\n\ncv2.imwrite('test_1.jpg', draw_rect(new_img, new_bbox))\n\n# cv2.imwrite('test.jpg', draw_rect(img, bboxes))\n\n# img = draw_rect(img, bboxes)\n# cv2.\n# plt.savefig(img, 'test.jpg')\n# # cv2.imshow(img)\n# # plt.imshow(draw_rect(new_img, new_bbox))\n"
] |
[
[
"numpy.array"
]
] |
adriangutierrezg/combinato
|
[
"ee6fe8fcc5748da758add38ee842ce942151a4bb"
] |
[
"combinato/guisort/sorter.py"
] |
[
"#!/usr/bin/env python3\n\"\"\"\nthis file contains the code for the spike sorting GUI\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\nimport sys\nimport os\nfrom getpass import getuser\nfrom time import strftime\nimport time\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import (QMainWindow, QApplication, QListView,\n QMessageBox, QFileDialog)\n\nfrom .ui_sorter import Ui_MainWindow\n\nfrom .sort_widgets import AllGroupsFigure, ComparisonFigure,\\\n GroupOverviewFigure\nfrom .raster_figure import RasterFigure\nfrom .backend import Backend\nfrom .load_joblist import PickJobList, GotoJob\nfrom .picksession import PickSessionDialog\nfrom .group_list_model import ClusterDelegate\nfrom .basics import spikeDist\n\nimport numpy as np\n\nfrom .. import options, TYPE_ART, TYPE_MU, TYPE_SU, TYPE_NO\n\nimageSize = 260\nstylesheet = 'QListView:focus { background-color: rgb(240, 255, 255)}'\nDEBUG = options['Debug']\nLOGFILENAME = 'css_gui_log.txt'\n\n\nclass SpikeSorter(QMainWindow, Ui_MainWindow):\n \"\"\"\n main class\n \"\"\"\n def __init__(self, parent=None, arg=None):\n super(SpikeSorter, self).__init__(parent)\n\n self.setupUi(self)\n self.backend = None\n self.groupOverviewFigure = GroupOverviewFigure(self.centralwidget)\n self.allGroupsFigureDirty = True\n\n self.oneGroupLayout.addWidget(self.groupOverviewFigure)\n\n self.allGroupsFigure = AllGroupsFigure(self.centralwidget)\n self.allGroupsFigure.fig.canvas.mpl_connect('button_press_event',\n self.onclick)\n self.allGroupsLayout.addWidget(self.allGroupsFigure)\n self.groupsComparisonFigure = ComparisonFigure(self.centralwidget)\n self.compareFigureLayout.addWidget(self.groupsComparisonFigure)\n view = self.listView\n view.setViewMode(QListView.IconMode)\n view.setResizeMode(QListView.Adjust)\n view.setItemDelegate(ClusterDelegate(self))\n view.setStyleSheet(stylesheet)\n view.addAction(self.actionMakeArtifact)\n view.addAction(self.actionMarkCluster)\n\n self.allGroupsTab.addAction(self.actionAutoassign)\n\n for action in (self.actionMakeArtifact,\n self.actionMarkCluster):\n action.setShortcutContext(Qt.WidgetShortcut)\n\n self.groupComboBox.addAction(self.actionNextGroup)\n self.actionNewGroup.triggered.connect(self.actionNewGroup_triggered)\n self.pushButtonSave.clicked.connect(self.save_one_group)\n self.pushButtonMerge.clicked.connect(self.actionMerge_triggered)\n self.pushButtonTidy.clicked.connect(self.actionTidyGroups_triggered)\n\n self.groupComboBox.currentIndexChanged.\\\n connect(self.updateListView)\n self.tabWidget.setTabEnabled(3, False)\n\n self.tabWidget.currentChanged.\\\n connect(self.updateActiveTab)\n\n self.autoassignPushButton.clicked.\\\n connect(self.on_actionAutoassign_triggered)\n\n self.multiRadioButton.toggled.connect(self.saveTypeMU)\n self.singleRadioButton.toggled.connect(self.saveTypeSU)\n self.artifactRadioButton.toggled.connect(self.saveTypeArti)\n\n self.actionOpen.triggered.connect(self.actionOpen_triggered)\n self.comparePlotpushButton.clicked.connect(self.compare_groups)\n\n self.actionSave.triggered.connect(self.actionSave_triggered)\n self.actionOpenJobs.triggered.connect(self.actionOpenJobs_triggered)\n self.actionNextJob.triggered.connect(self.actionNextJob_triggered)\n self.actionMergeAll.triggered.connect(self.actionMergeAll_triggered)\n self.actionGotoJob.triggered.connect(self.actionGotoJob_triggered)\n self.actionMerge.triggered.connect(self.actionMerge_triggered)\n self.actionMerge_one_unit_groups.triggered.\\\n connect(self.action_MergeOnes_triggered)\n self.actionSave_to_Matfile.triggered.connect(self.action_export_triggered)\n\n if len(arg) > 1:\n self.basedir = os.path.dirname(arg)\n else:\n self.basedir = os.getcwd()\n\n self.logfid = open(LOGFILENAME, 'a')\n self.user = getuser()\n\n self.rasterFigure = None\n\n if 'RunGuiWithRaster' in options:\n if options['RunGuiWithRaster']:\n self.tabWidget.setTabEnabled(3, True)\n self.init_raster()\n\n def init_raster(self):\n import pandas as pd\n from .. import raster_options\n\n # the following should read all standard experiment codes\n # e.g. 'fn2' or 'ospr3' (string plus one digit)\n base = os.path.basename(self.basedir)\n try:\n pat = base[:3]\n paradigm = ''\n for char in base[6:]:\n paradigm += char\n if char.isdigit():\n break\n\n except ValueError:\n print('Unable to initialize raster meta data')\n return\n #infix = '{:03d}{}{}'.format(pat, raster_options['infix'], run)\n infix = pat+paradigm\n fname_frame = 'frame_{}.h5'.format(infix)\n frame = pd.read_hdf(fname_frame, raster_options['frame_name'])\n meta_prefix = raster_options['meta_prefix']\n image_path = os.path.join(meta_prefix, infix)\n if paradigm.startswith('fn'):\n image_path = os.path.join(meta_prefix, infix, infix)\n\n # now initialize the data\n self.rasterFigure = RasterFigure(self.centralwidget)\n self.rasterLayout.addWidget(self.rasterFigure)\n self.rasterFigure.set_paradigm_data(frame, image_path)\n self.pushButtonUpdateRasters.setEnabled(True)\n self.lineEditStimSelect.setEnabled(True)\n self.pushButtonUpdateRasters.clicked.connect(self.actionUpdateRasters.trigger)\n self.actionUpdateRasters.triggered.connect(self.update_rasters)\n\n def update_rasters(self):\n if self.backend is None:\n return\n gid = str(self.groupComboBox.currentText())\n group = self.backend.sessions.groupsByName[gid]\n current_paradigm = str(self.lineEditStimSelect.text())\n\n indexes = self.listView.selectedIndexes()\n if indexes:\n index = indexes[0].row()\n else:\n index = -4\n\n tlist = []\n clist = []\n for i, cluster in enumerate(group.clusters):\n if i == index:\n clist.append(cluster.times)\n else:\n tlist.append(cluster.times)\n\n times = []\n for mylist in (tlist, clist):\n if mylist:\n times.append(np.hstack(mylist))\n\n self.rasterFigure.update_figure(times, current_paradigm)\n\n def save_one_group(self):\n \"\"\"\n save a plot of one group\n \"\"\"\n fout = QFileDialog.getSaveFileName(self,\n \"Save as Image\", os.getcwd(),\n \"Image files (*.jpg *.pdf *.png)\")\n self.groupOverviewFigure.save_as_file(str(fout[0]), dpi=300)\n\n def on_actionAutoassign_triggered(self):\n print(self.sender().text())\n\n if self.backend is None:\n return\n elif self.backend.sessions is None:\n return\n\n groupName = str(self.groupComboBox.currentText())\n group = self.backend.sessions.groupsByName[groupName]\n print('Auto-assigning group {}'.format(group))\n\n if group == '':\n return\n\n indices = self.listView.selectedIndexes()\n if len(indices) == 0:\n return\n index = indices[0].row()\n\n selectedMean = group.clusters[index].meanspike\n means = dict()\n\n for name, group in self.backend.sessions.groupsByName.items():\n if name not in ['Unassigned', 'Artifacts']:\n means[name] = np.array(group.meandata).mean(0)\n\n dist = np.inf\n minimizer = None\n\n for name, mean in means.items():\n if name != groupName:\n d = spikeDist(mean, selectedMean)\n if d < dist:\n dist = d\n minimizer = name\n\n print('Moving to ' + minimizer + ', distance {:2f}'.format(dist))\n self.move(self.backend.sessions.groupsByName[minimizer])\n self.updateActiveTab()\n l = self.backend.sessions.groupsByName[minimizer].assignAxis.get_lines()\n l[-1].set_color('r')\n self.allGroupsFigure.draw()\n\n def onclick(self, event):\n\n if (event.inaxes is not None) and\\\n (self.backend is not None) and\\\n (self.backend.sessions is not None):\n num = int(event.inaxes.get_label())\n src = self.listView\n dst = self.backend.sessions.groupsById[num]\n self.move(dst, src)\n self.updateActiveTab()\n\n def actionOpen_triggered(self, checked, filename=None):\n if self.backend is not None:\n if self.backend.sessions is not None:\n if self.backend.sessions.dirty:\n self.actionSave.trigger()\n\n del self.backend\n self.backend = None\n\n dialog = PickSessionDialog(self.basedir, self)\n\n if dialog.exec_():\n item = str(dialog.sessionList.selectedItems()[0].text()).split()\n folder = ' '.join(item[0:-2])\n datafile = item[-2]\n sortingfile = item[-1]\n print(folder, datafile, sortingfile)\n item = str(dialog.timesList.selectedItems()[0].text()).split()\n try:\n start_time_ms = int(item[1])/1000\n stop_time_ms = int(item[2])/1000\n except IndexError:\n start_time_ms = 0\n stop_time_ms = np.inf\n\n print('Opening {} {} {} ({} ms to {} ms)'.\n format(folder, datafile, sortingfile,\n start_time_ms, stop_time_ms))\n\n datapath = os.path.join(folder, datafile)\n sessionpath = os.path.join(folder, sortingfile)\n\n self.backend = Backend(datapath, sessionpath,\n start_time_ms, stop_time_ms)\n\n self.status_string = 'Datafile: {} Sorting: {}'.format(datafile,\n sortingfile)\n self.folderLabel.setText(self.status_string)\n else:\n return\n\n self.update_after_open()\n\n def open_job(self, job_to_open):\n \"\"\"\n open a job from the list\n \"\"\"\n if self.backend is not None:\n if self.backend.sessions is not None:\n if self.backend.sessions.dirty:\n self.actionSave.trigger()\n\n del self.backend\n self.backend = None\n\n job = self.job_names[job_to_open]\n\n datapath = os.path.join(self.basedir, job)\n sessionpath = os.path.join(self.basedir, os.path.dirname(job),\n self.job_label)\n\n self.backend = Backend(datapath, sessionpath,\n self.job_start_time_ms, self.job_stop_time_ms)\n self.current_job = job_to_open\n self.status_string = 'Job: {}/{} Datafile: {}\\\n Sorting: {}'.format(self.current_job + 1,\n len(self.job_names),\n job, self.job_label)\n\n self.folderLabel.setText(self.status_string)\n self.update_after_open()\n\n def update_after_open(self):\n self.allGroupsFigureDirty = True\n self.actionNewGroup.setEnabled(True)\n\n sps = self.backend.sorting_manager.\\\n get_samples_per_spike()\n\n t = (self.backend.sessions.start_time,\n self.backend.sessions.stop_time)\n\n thresholds = self.backend.get_thresholds()\n self.groupOverviewFigure.setOptions((0, sps),\n t,\n self.backend.sign,\n thresholds)\n\n self.updateGroupsList()\n self.updateActiveTab()\n\n def actionNextJob_triggered(self):\n \"\"\"\n go to the next job\n \"\"\"\n cj = self.current_job\n if cj + 1 < len(self.job_names):\n self.open_job(cj + 1)\n else:\n print('Last job open')\n return\n\n def actionGotoJob_triggered(self):\n if self.backend is not None:\n if self.backend.sessions is not None:\n if self.backend.sessions.dirty:\n self.actionSave.trigger()\n\n del self.backend\n self.backend = None\n\n dialog = GotoJob(self.job_names, self)\n\n if dialog.exec_():\n item = str(dialog.joblist.selectedItems()[0].text())\n print(item)\n jobid = int(item.split()[0])\n print(jobid)\n self.open_job(jobid)\n\n def actionOpenJobs_triggered(self):\n \"\"\"\n open a job list\n \"\"\"\n if self.backend is not None:\n if self.backend.sessions is not None:\n if self.backend.sessions.dirty:\n self.actionSave.trigger()\n\n del self.backend\n self.backend = None\n\n dialog = PickJobList(self.basedir, self)\n\n if dialog.exec_():\n jobfile = str(dialog.jobfileList.selectedItems()[0].text())\n with open(jobfile, 'r') as fid:\n jobs = [line.strip() for line in fid.readlines()]\n fid.close()\n\n label = str(dialog.labelList.selectedItems()[0].text())\n\n item = str(dialog.timesList.selectedItems()[0].text()).split()\n try:\n start_time_ms = int(item[1])/1000\n stop_time_ms = int(item[2])/1000\n except IndexError:\n start_time_ms = 0\n stop_time_ms = np.inf\n\n # store info for later loading\n self.job_names = jobs\n self.job_label = label\n self.job_start_time_ms = start_time_ms\n self.job_stop_time_ms = stop_time_ms\n job_to_open = 0\n\n print('Loaded {} jobs from {} {} ({} ms to {} ms)'.\n format(len(jobs), self.basedir, jobfile,\n start_time_ms, stop_time_ms))\n\n self.open_job(job_to_open)\n\n def actionNewGroup_triggered(self):\n if self.backend.sessions is None:\n return\n\n self.backend.sessions.newGroup()\n oldtext = self.groupComboBox.currentText()\n self.updateGroupsList(oldtext)\n self.allGroupsFigureDirty = True\n self.updateActiveTab()\n\n def actionSetTime_triggered(self, checked):\n if self.backend is None:\n return\n\n dialog = PickTimeDialog(self)\n\n if dialog.exec_():\n item = [str(item.text()) for\n item in dialog.widget.selectedItems()][0]\n start, _, stop, fname = item.split()\n print(start, stop, fname[1:-2])\n start, stop = [int(x)/1000 for x in (start, stop)]\n self.backend.set_sign_start_stop('pos', start, stop)\n\n def actionSelectSession_triggered(self, checked):\n\n if self.backend is None:\n return\n\n if self.backend.sessions is not None:\n if self.backend.sessions.dirty:\n self.actionSave.trigger()\n\n dialog = PickSessionDialog(self)\n\n if dialog.exec_():\n item = [str(item.text()) for\n item in dialog.widget.selectedItems()][0]\n # print('Opening ' + item)\n self.backend.open_sessions(item)\n\n else:\n return\n\n # self.sessionLabel.setText(text)\n\n self.allGroupsFigureDirty = True\n self.actionNewGroup.setEnabled(True)\n x = self.backend.sorting_manager.\\\n get_samples_per_spike()\n # total time in seconds\n t = (self.backend.sessions.start_time,\n self.backend.sessions.stop_time)\n\n # wrong place, should be executed only once!\n self.groupOverviewFigure.setOptions((0, x), t,\n self.backend.sign)\n\n self.updateGroupsList()\n self.updateActiveTab()\n\n def actionMergeAll_triggered(self):\n \"\"\"\n move all clusters to the first group\n \"\"\"\n groups = self.backend.sessions.groupsByName\n names = sorted(groups.keys())\n if len(names) <= 3:\n print('Nothing to move, only groups: {}'.format(names))\n return\n\n target = names[0]\n print('Moving everything to group {}'.format(target))\n\n for name in names[1:]:\n try:\n int(name)\n self.merge_groups(name, target)\n except ValueError:\n print('not moving {}'.format(name))\n\n def actionMerge_triggered(self):\n \"\"\"\n move all clusters from second group to first group\n \"\"\"\n\n current = str(self.tabWidget.currentWidget().objectName())\n if current == 'compareTab':\n tgt = str(self.groupOnecomboBox.currentText())\n src = str(self.groupTwoComboBox.currentText())\n msg = \"Would you like to merge \"\\\n \"group {} into group {}?\".format(src, tgt)\n else:\n return\n\n try:\n int(tgt)\n int(src)\n except ValueError:\n print('Not merging {} and {}!'.format(src, tgt))\n return\n\n if not len(tgt) * len(src):\n return\n\n box = QMessageBox(QMessageBox.Question, 'Merging groups', msg,\n buttons=(QMessageBox.Ok | QMessageBox.Cancel))\n\n box.exec_()\n if box.result() == QMessageBox.Ok:\n self.merge_groups(src, tgt)\n\n def action_MergeOnes_triggered(self):\n \"\"\"\n merge all groups with only one member\n \"\"\"\n groups = self.backend.sessions.groupsById\n shorties = []\n\n for gid in groups.keys():\n if (gid > 0) and (len(groups[gid].clusters) == 1):\n shorties.append(gid)\n\n if len(shorties):\n tgt = shorties[0]\n\n for src in shorties[1:]:\n print('Merging {} to {}'.format(src, tgt))\n self.merge_groups(src, tgt, mode='by-id', finalize=False)\n\n self.listView.reset()\n self.updateActiveTab()\n\n\n def merge_groups(self, src, tgt, mode='by-name', finalize=True):\n \"\"\"\n merge two groups\n \"\"\"\n if mode == 'by-name':\n groups = self.backend.sessions.groupsByName\n elif mode == 'by-id':\n groups = self.backend.sessions.groupsById\n else:\n return\n clusters = groups[src].removeClusters()\n groups[tgt].addClusters(clusters)\n self.backend.sessions.dirty = True\n\n if finalize:\n self.listView.reset()\n self.updateActiveTab()\n\n def compare_groups(self):\n group1name = str(self.groupOnecomboBox.currentText())\n group2name = str(self.groupTwoComboBox.currentText())\n\n group1 = self.backend.sessions.groupsByName[group1name]\n group2 = self.backend.sessions.groupsByName[group2name]\n\n self.groupsComparisonFigure.xcorr(group1, group2)\n\n def actionSave_triggered(self):\n msgBox = QMessageBox()\n msgBox.setText(\"Save changes to current session?\")\n msgBox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\n msgBox.setDefaultButton(QMessageBox.Yes)\n ret = msgBox.exec_()\n if ret == QMessageBox.Yes:\n self.backend.sessions.save()\n now = strftime('%Y-%m-%d_%H-%M-%S')\n self.logfid.write('{} {} saved {}\\n'.format(now, self.user,\n self.status_string))\n self.backend.sessions.dirty = False\n\n def on_actionMarkCluster_triggered(self):\n\n name = self.tabWidget.currentWidget().objectName()\n indexes = self.listView.selectedIndexes()\n if len(indexes) == 0:\n return\n index = indexes[0].row()\n\n groupName = str(self.groupComboBox.currentText())\n if groupName == '':\n return\n\n if name == 'oneGroupTab':\n group = self.backend.sessions.groupsByName[groupName]\n clusterdata = np.diff(group.clusters[index].times)\n idx = (clusterdata < options['compute_isi_upto_ms']) & (clusterdata > 0)\n clusterdata = clusterdata[idx]\n self.groupOverviewFigure.mark(index, clusterdata)\n\n elif name == 'allGroupsTab':\n self.allGroupsFigure.mark(groupName, index)\n\n\n def on_actionMakeArtifact_triggered(self):\n self.move(self.backend.sessions.groupsByName['Artifacts'])\n self.updateGroupInfo()\n self.updateActiveTab()\n\n def on_actionNextGroup_triggered(self):\n \"\"\"\n rotate through groups\n \"\"\"\n ngroups = len(self.backend.sessions.groupsByName)\n if self.backend is not None:\n index = self.groupComboBox.currentIndex()\n if index + 1 < ngroups:\n self.groupComboBox.setCurrentIndex(index + 1)\n elif index + 1 == ngroups:\n self.groupComboBox.setCurrentIndex(0)\n\n def updateListView(self, e):\n index = str(self.groupComboBox.currentText())\n if index == '':\n return\n model = self.backend.sessions.groupsByName[index]\n self.listView.setModel(model)\n self.listView.selectionModel().currentChanged.\\\n connect(self.on_actionMarkCluster_triggered)\n self.setRadioButtons(index)\n self.updateActiveTab()\n\n def setRadioButtons(self, index):\n model = self.backend.sessions.groupsByName[index]\n group_type = model.group_type\n if group_type == TYPE_MU:\n button = self.multiRadioButton\n elif group_type in (TYPE_ART, TYPE_NO):\n button = self.artifactRadioButton\n elif group_type == TYPE_SU:\n button = self.singleRadioButton\n else:\n raise Warning('Type not defined')\n\n button.setChecked(True)\n\n def save_type(self, new_type):\n index = str(self.groupComboBox.currentText())\n model = self.backend.sessions.groupsByName[index]\n model.group_type = new_type\n self.backend.sessions.dirty = True\n self.allGroupsFigureDirty = True\n self.updateActiveTab()\n\n def saveTypeMU(self, checked):\n \"\"\"\n dispatch\n \"\"\"\n if checked:\n self.save_type(TYPE_MU)\n\n def saveTypeSU(self, checked):\n \"\"\"\n dispatch\n \"\"\"\n if checked:\n self.save_type(TYPE_SU)\n\n def saveTypeArti(self, checked):\n \"\"\"\n dispatch\n \"\"\"\n if checked:\n self.save_type(TYPE_ART)\n\n def move(self, dst, src=None):\n self.backend.sessions.dirty = True\n if src is None:\n src = self.listView\n indexes = src.selectedIndexes()\n\n for obj in (src.model(), dst):\n obj.beginResetModel()\n \n for index in indexes:\n cl = src.model().popCluster(index.row())\n dst.addCluster(cl)\n\n for obj in (src.model(), dst):\n obj.endResetModel()\n\n src.reset()\n\n self.updateGroupInfo()\n\n def updateGroupsList(self, oldtext=None):\n groupsById = self.backend.sessions.groupsById\n box = self.groupComboBox\n box.clear()\n index = 0\n setindex = None\n for group in sorted(groupsById.keys()):\n name = groupsById[group].name\n box.addItem(name)\n if name == oldtext:\n setindex = index\n index += 1\n\n if setindex is not None:\n box.setCurrentIndex(setindex)\n\n box.setEnabled(True)\n\n def updateActiveTab(self):\n\n current = self.tabWidget.currentWidget().objectName()\n\n if current == 'allGroupsTab':\n self.updateAssignPlot()\n\n elif current == 'oneGroupTab':\n self.updateGroupInfo()\n\n elif current == 'compareTab':\n self.updateCompareTab()\n\n def updateCompareTab(self):\n if self.backend is None:\n return\n groupsById = self.backend.sessions.groupsById\n box1 = self.groupOnecomboBox\n box2 = self.groupTwoComboBox\n boxes = (box1, box2)\n for box in boxes:\n box.clear()\n\n for group in sorted(groupsById.keys()):\n for box in boxes:\n name = groupsById[group].name\n box.addItem(name)\n box.setEnabled(True)\n\n def updateGroupInfo(self):\n\n groupName = str(self.groupComboBox.currentText())\n if groupName == '':\n return\n group = self.backend.sessions.groupsByName[groupName]\n self.groupOverviewFigure.updateInfo(group)\n\n def updateAssignPlot(self):\n \"\"\"\n make sure plot with all mean spikes is up-to-date\n \"\"\"\n\n # The speed could still be improved in this function\n if (self.backend is None) or\\\n (self.backend.sessions is None):\n return\n\n session = self.backend.sessions\n\n index = []\n\n for name, group in session.groupsById.items():\n if group.group_type not in [TYPE_ART, TYPE_NO]:\n index.append(name)\n\n index.sort()\n if self.allGroupsFigureDirty:\n self.allGroupsFigure.\\\n addAxes(self.backend.x, session, index)\n self.allGroupsFigureDirty = False\n else:\n self.allGroupsFigure.updateInfo(index)\n\n def actionTidyGroups_triggered(self):\n if self.backend is None:\n return\n if self.backend.sessions is None:\n return\n\n t1 = time.time()\n self.backend.sessions.reorganize_groups()\n print('Reorganization took {:.3f} seconds'.format(time.time() - t1))\n self.allGroupsFigureDirty = True\n self.updateGroupsList()\n self.updateActiveTab()\n\n def action_export_triggered(self):\n if self.backend is None:\n return\n if self.backend.sessions is None:\n return\n datafilename, extn = os.path.splitext(self.backend.datafile)\n outfname = os.path.join(self.backend.folder, datafilename + '.mat')\n print('Saving to {}'.format(outfname))\n self.backend.sessions.export_to_matfile(outfname)\n\n\ndef except_hook(cls, exception, traceback):\n sys.__excepthook__(cls, exception, traceback)\n\n\ndef main():\n sys.excepthook = except_hook\n app = QApplication(sys.argv)\n app.setStyle(options['guistyle'])\n win = SpikeSorter(parent=None, arg=sys.argv)\n win.setWindowTitle('Combinato Spike Sorter')\n win.showMaximized()\n app.exec_()\n"
] |
[
[
"numpy.hstack",
"pandas.read_hdf",
"numpy.array",
"numpy.diff"
]
] |
askoki/nfl_dpi_prediction
|
[
"dc3256f24ddc0b6725eace2081d1fb1a7e5ce805"
] |
[
"src/visualization/accumulate_grid_search_report.py"
] |
[
"import os\nimport pandas as pd\nfrom settings import Models, FIGURES_DIR, STATISTICS_FILENAME\nfrom src.features.helpers.data_load import get_n_cmd_arg\n\nfolder_result_name = get_n_cmd_arg('Enter results folder!', 1)\nmodels_list = Models.get_models_list()\n\ncumulative_results = pd.DataFrame()\n# Disable pandas SettingWithCopyWarning\npd.options.mode.chained_assignment = None\ncell_options = [8, 64, 128]\nfor model_type in models_list:\n try:\n test_statistics = pd.read_excel(\n os.path.join(FIGURES_DIR, model_type, folder_result_name, STATISTICS_FILENAME),\n sheet_name='Test',\n engine='openpyxl',\n )\n val_statistics = pd.read_excel(\n os.path.join(FIGURES_DIR, model_type, folder_result_name, STATISTICS_FILENAME),\n sheet_name='Validation',\n engine='openpyxl',\n )\n except FileNotFoundError:\n continue\n\n for cell in cell_options:\n cell_result = val_statistics[val_statistics.cells == cell]\n test_cells = test_statistics[test_statistics.cells == cell]\n\n # calculate best on validation set\n max_recall = cell_result.recall.max()\n cell_result = cell_result[cell_result.recall == max_recall]\n # if multiple models have the same recall then pick with highest precision\n if cell_result.shape[0] > 1:\n max_precision = cell_result.precision.max()\n # if recall and precision are the same pick the first one\n cell_result = cell_result[cell_result.precision == max_precision].head(1)\n cell_result['model'] = model_type\n cell_result['cells'] = cell\n\n # add test data\n test_result = test_cells[test_cells.name == cell_result.name.unique()[0]]\n cell_result['test_f1'] = test_result.f1\n cell_result['test_accuracy'] = test_result.accuracy\n cell_result['test_auc'] = test_result.auc\n cell_result['test_precision'] = test_result.precision\n cell_result['test_recall'] = test_result.recall\n cell_result = cell_result[[\n 'model', 'cells', 'f1', 'test_f1',\n 'accuracy', 'test_accuracy', 'auc', 'test_auc', 'precision', 'test_precision',\n 'recall', 'test_recall', 'name']]\n cumulative_results = cumulative_results.append(cell_result, ignore_index=True)\n\ncumulative_results.to_csv(os.path.join(FIGURES_DIR, f'Cumulative_statistics_{folder_result_name}.csv'), index=False)\n"
] |
[
[
"pandas.DataFrame"
]
] |
christineton/D3
|
[
"c2c3ecff3d735628e790839fd2d0ae4dadb7b4f8"
] |
[
"03-Ins_Fullstack_Flask_Plotly/demo/app.py"
] |
[
"import pandas as pd\n\nfrom flask import (\n Flask,\n render_template,\n jsonify)\n\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\n# The database URI\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///db/emoji.sqlite\"\n\ndb = SQLAlchemy(app)\n\n\n# Create our database model\nclass Emoji(db.Model):\n __tablename__ = 'emoji'\n\n id = db.Column(db.Integer, primary_key=True)\n emoji_char = db.Column(db.String)\n emoji_id = db.Column(db.String)\n name = db.Column(db.String)\n score = db.Column(db.Integer)\n\n def __repr__(self):\n return '<Emoji %r>' % (self.name)\n\n\n# Create database tables\[email protected]_first_request\ndef setup():\n # Recreate database each time for demo\n # db.drop_all()\n db.create_all()\n\n\[email protected](\"/\")\ndef home():\n \"\"\"Render Home Page.\"\"\"\n return render_template(\"index.html\")\n\n\[email protected](\"/emoji_char\")\ndef emoji_char_data():\n \"\"\"Return emoji score and emoji char\"\"\"\n\n # Query for the top 10 emoji data\n results = db.session.query(Emoji.emoji_char, Emoji.score).\\\n order_by(Emoji.score.desc()).\\\n limit(10).all()\n\n # Create lists from the query results\n emoji_char = [result[0] for result in results]\n scores = [int(result[1]) for result in results]\n\n # Generate the plot trace\n trace = {\n \"x\": emoji_char,\n \"y\": scores,\n \"type\": \"bar\"\n }\n return jsonify(trace)\n\n\[email protected](\"/emoji_id\")\ndef emoji_id_data():\n \"\"\"Return emoji score and emoji id\"\"\"\n\n # Query for the emoji data using pandas\n query_statement = db.session.query(Emoji).\\\n order_by(Emoji.score.desc()).\\\n limit(10).statement\n df = pd.read_sql_query(query_statement, db.session.bind)\n\n # Format the data for Plotly\n trace = {\n \"x\": df[\"emoji_id\"].values.tolist(),\n \"y\": df[\"score\"].values.tolist(),\n \"type\": \"bar\"\n }\n return jsonify(trace)\n\n\[email protected](\"/emoji_name\")\ndef emoji_name_data():\n \"\"\"Return emoji score and emoji name\"\"\"\n\n # Query for the top 10 emoji data\n results = db.session.query(Emoji.name, Emoji.score).\\\n order_by(Emoji.score.desc()).\\\n limit(10).all()\n df = pd.DataFrame(results, columns=['name', 'score'])\n\n # Format the data for Plotly\n plot_trace = {\n \"x\": df[\"name\"].values.tolist(),\n \"y\": df[\"score\"].values.tolist(),\n \"type\": \"bar\"\n }\n return jsonify(plot_trace)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n"
] |
[
[
"pandas.read_sql_query",
"pandas.DataFrame"
]
] |
stan-holaysan/Thesis-MaskRCNN
|
[
"ecc4de12473545bdfb744f01224a6df6266ef605"
] |
[
"train2.py"
] |
[
"\"\"\"\nMask R-CNN\nTrain on the toy bottle dataset and implement color splash effect.\nCopyright (c) 2018 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n------------------------------------------------------------\nUsage: import the module (see Jupyter notebooks for examples), or run from\n the command line as such:\n # Train a new model starting from pre-trained COCO weights\n python3 bottle.py train --dataset=/home/datascience/Workspace/maskRcnn/Mask_RCNN-master/samples/bottle/dataset --weights=coco\n # Resume training a model that you had trained earlier\n python3 bottle.py train --dataset=/path/to/bottle/dataset --weights=last\n # Train a new model starting from ImageNet weights\n python3 bottle.py train --dataset=/path/to/bottle/dataset --weights=imagenet\n # Apply color splash to an image\n python3 bottle.py splash --weights=/path/to/weights/file.h5 --image=<URL or path to file>\n # Apply color splash to video using the last weights you trained\n python3 bottle.py splash --weights=last --video=<URL or path to file>\n\"\"\"\n\nimport os\nimport sys\nimport json\nimport datetime\nimport numpy as np\nimport skimage.draw\nimport cv2\nfrom mrcnn.visualize import display_instances\nimport matplotlib.pyplot as plt\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn.config import Config\nfrom mrcnn import model as modellib, utils\n\n# Path to trained weights file\nCOCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n\n# Directory to save logs and model checkpoints, if not provided\n# through the command line argument --logs\nDEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n############################################################\n# Check to see if tensowflow-GPU working\n############################################################\nimport tensorflow as tf\ndevice_name = tf.test.gpu_device_name()\nif device_name != '/device:GPU:0':\n raise SystemError('GPU device not found')\nprint('Found GPU at: {}'.format(device_name))\nprint(\"Num GPUs Available: \", len(tf.config.list_physical_devices('GPU')))\n\n############################################################\n# Configurations\n############################################################\n\n\nclass CustomConfig(Config):\n \"\"\"Configuration for training on the toy dataset.\n Derives from the base Config class and overrides some values.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"object\"\n\n # We use a GPU with 12GB memory, which can fit two images.\n # Adjust down if you use a smaller GPU.\n IMAGES_PER_GPU = 2\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 2 # Background + no. of classes\n\n # Number of training steps per epoch\n STEPS_PER_EPOCH = 100\n\n # Skip detections with < 90% confidence\n DETECTION_MIN_CONFIDENCE = 0.9\n\n\n############################################################\n# Dataset\n############################################################\n\nclass CustomDataset(utils.Dataset):\n\n def load_custom(self, dataset_dir, subset):\n \"\"\"Load a subset of the bottle dataset.\n dataset_dir: Root directory of the dataset.\n subset: Subset to load: train or val\n \"\"\"\n # Add classes. We have only one class to add.\n self.add_class(\"object\", 1, \"Lettuce leaf with less light\")\n self.add_class(\"object\", 2, \"Lettuce leaf with more light\")\n\n # Train or validation dataset?\n assert subset in [\"train\", \"val\"]\n dataset_dir = os.path.join(dataset_dir, subset)\n\n # Load annotations\n # VGG Image Annotator saves each image in the form:\n # { 'filename': '28503151_5b5b7ec140_b.jpg',\n # 'regions': {\n # '0': {\n # 'region_attributes': {},\n # 'shape_attributes': {\n # 'all_points_x': [...],\n # 'all_points_y': [...],\n # 'name': 'polygon'}},\n # ... more regions ...\n # },\n # 'size': 100202\n # }\n # We mostly care about the x and y coordinates of each region\n annotations1 = json.load(open(os.path.join(dataset_dir, \"via_region_data.json\")))\n # print(annotations1)\n # annotations = list(annotations1.values()) # don't need the dict keys\n\n # The VIA tool saves images in the JSON even if they don't have any\n # annotations. Skip unannotated images.\n annotations = [a for a in annotations1 if a['regions']]\n \n # Add images\n for a in annotations:\n # print(a)\n # Get the x, y coordinaets of points of the polygons that make up\n # the outline of each object instance. There are stores in the\n # shape_attributes (see json format above)\n polygons = [r['shape_attributes'] for r in a['regions']] \n objects = [s['region_attributes']['name'] for s in a['regions']]\n print(\"objects:\",objects)\n name_dict = {\"Lettuce leaf with less light\": 1,\"Lettuce leaf with more light\": 2}\n # key = tuple(name_dict)\n num_ids = [name_dict[a] for a in objects]\n \n # num_ids = [int(n['Event']) for n in objects]\n # load_mask() needs the image size to convert polygons to masks.\n # Unfortunately, VIA doesn't include it in JSON, so we must read\n # the image. This is only managable since the dataset is tiny.\n print(\"numids\",num_ids)\n image_path = os.path.join(dataset_dir, a['filename'])\n image = skimage.io.imread(image_path)\n height, width = image.shape[:2]\n\n self.add_image(\n \"object\", ## for a single class just add the name here\n image_id=a['filename'], # use file name as a unique image id\n path=image_path,\n width=width, height=height,\n polygons=polygons,\n num_ids=num_ids)\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks for an image.\n Returns:\n masks: A bool array of shape [height, width, instance count] with\n one mask per instance.\n class_ids: a 1D array of class IDs of the instance masks.\n \"\"\"\n # If not a bottle dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"object\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n if info[\"source\"] != \"object\":\n return super(self.__class__, self).load_mask(image_id)\n num_ids = info['num_ids']\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n \trr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n\n \tmask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n # Map class names to class IDs.\n num_ids = np.array(num_ids, dtype=np.int32)\n return mask, num_ids\n\n def image_reference(self, image_id):\n \"\"\"Return the path of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"object\":\n return info[\"path\"]\n else:\n super(self.__class__, self).image_reference(image_id)\n\n\ndef train(model):\n \"\"\"Train the model.\"\"\"\n # Training dataset.\n dataset_train = CustomDataset()\n dataset_train.load_custom(args.dataset, \"train\")\n dataset_train.prepare()\n\n # Validation dataset\n dataset_val = CustomDataset()\n dataset_val.load_custom(args.dataset, \"val\")\n dataset_val.prepare()\n\n # *** This training schedule is an example. Update to your needs ***\n # Since we're using a very small dataset, and starting from\n # COCO trained weights, we don't need to train too long. Also,\n # no need to train all layers, just the heads should do it.\n print(\"Training network heads\")\n model.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE,\n epochs=10,\n layers='heads')\n\n\ndef color_splash(image, mask):\n \"\"\"Apply color splash effect.\n image: RGB image [height, width, 3]\n mask: instance segmentation mask [height, width, instance count]\n Returns result image.\n \"\"\"\n # Make a grayscale copy of the image. The grayscale copy still\n # has 3 RGB channels, though.\n gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255\n # We're treating all instances as one, so collapse the mask into one layer\n mask = (np.sum(mask, -1, keepdims=True) >= 1)\n # Copy color pixels from the original color image where mask is set\n if mask.shape[0] > 0:\n splash = np.where(mask, image, gray).astype(np.uint8)\n else:\n splash = gray\n return splash\n\n\ndef detect_and_color_splash(model, image_path=None, video_path=None):\n assert image_path or video_path\n\n # Image or video?\n if image_path:\n # Run model detection and generate the color splash effect\n print(\"Running on {}\".format(args.image))\n # Read image\n image = skimage.io.imread(args.image)\n # Detect objects\n r = model.detect([image], verbose=1)[0]\n # Color splash\n splash = color_splash(image, r['masks'])\n # Save output\n file_name = \"splash_{:%Y%m%dT%H%M%S}.png\".format(datetime.datetime.now())\n skimage.io.imsave(file_name, splash)\n elif video_path:\n import cv2\n # Video capture\n vcapture = cv2.VideoCapture(video_path)\n width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = vcapture.get(cv2.CAP_PROP_FPS)\n\n # Define codec and create video writer\n file_name = \"splash_{:%Y%m%dT%H%M%S}.avi\".format(datetime.datetime.now())\n vwriter = cv2.VideoWriter(file_name,\n cv2.VideoWriter_fourcc(*'MJPG'),\n fps, (width, height))\n\n count = 0\n success = True\n while success:\n print(\"frame: \", count)\n # Read next image\n success, image = vcapture.read()\n if success:\n # OpenCV returns images as BGR, convert to RGB\n image = image[..., ::-1]\n # Detect objects\n r = model.detect([image], verbose=0)[0]\n # Color splash\n splash = color_splash(image, r['masks'])\n # RGB -> BGR to save image to video\n splash = splash[..., ::-1]\n # Add image to video writer\n vwriter.write(splash)\n count += 1\n vwriter.release()\n print(\"Saved to \", file_name)\n\n############################################################\n# Training\n############################################################\n\nif __name__ == '__main__':\n import argparse\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n description='Train Mask R-CNN to detect custom class.')\n parser.add_argument(\"command\",\n metavar=\"<command>\",\n help=\"'train' or 'splash'\")\n parser.add_argument('--dataset', required=False,\n metavar=\"/path/to/custom/dataset/\",\n help='Directory of the custom dataset')\n parser.add_argument('--weights', required=True,\n metavar=\"/path/to/weights.h5\",\n help=\"Path to weights .h5 file or 'coco'\")\n parser.add_argument('--logs', required=False,\n default=DEFAULT_LOGS_DIR,\n metavar=\"/path/to/logs/\",\n help='Logs and checkpoints directory (default=logs/)')\n parser.add_argument('--image', required=False,\n metavar=\"path or URL to image\",\n help='Image to apply the color splash effect on')\n parser.add_argument('--video', required=False,\n metavar=\"path or URL to video\",\n help='Video to apply the color splash effect on')\n args = parser.parse_args()\n\n # Validate arguments\n if args.command == \"train\":\n assert args.dataset, \"Argument --dataset is required for training\"\n elif args.command == \"splash\":\n assert args.image or args.video,\\\n \"Provide --image or --video to apply color splash\"\n\n print(\"Weights: \", args.weights)\n print(\"Dataset: \", args.dataset)\n print(\"Logs: \", args.logs)\n\n # Configurations\n if args.command == \"train\":\n config = CustomConfig()\n else:\n class InferenceConfig(CustomConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n config = InferenceConfig()\n config.display()\n\n # Create model\n if args.command == \"train\":\n model = modellib.MaskRCNN(mode=\"training\", config=config,\n model_dir=args.logs)\n else:\n model = modellib.MaskRCNN(mode=\"inference\", config=config,\n model_dir=args.logs)\n\n # Select weights file to load\n if args.weights.lower() == \"coco\":\n weights_path = COCO_WEIGHTS_PATH\n # Download weights file\n if not os.path.exists(weights_path):\n utils.download_trained_weights(weights_path)\n elif args.weights.lower() == \"last\":\n # Find last trained weights\n weights_path = model.find_last()[1]\n elif args.weights.lower() == \"imagenet\":\n # Start from ImageNet trained weights\n weights_path = model.get_imagenet_weights()\n else:\n weights_path = args.weights\n\n # Load weights\n print(\"Loading weights \", weights_path)\n if args.weights.lower() == \"coco\":\n # Exclude the last layers because they require a matching\n # number of classes\n model.load_weights(weights_path, by_name=True, exclude=[\n \"mrcnn_class_logits\", \"mrcnn_bbox_fc\",\n \"mrcnn_bbox\", \"mrcnn_mask\"])\n else:\n model.load_weights(weights_path, by_name=True)\n\n # Train or evaluate\n if args.command == \"train\":\n train(model)\n elif args.command == \"splash\":\n detect_and_color_splash(model, image_path=args.image,\n video_path=args.video)\n else:\n print(\"'{}' is not recognized. \"\n \"Use 'train' or 'splash'\".format(args.command))"
] |
[
[
"tensorflow.test.gpu_device_name",
"tensorflow.config.list_physical_devices",
"numpy.array",
"numpy.where",
"numpy.sum"
]
] |
zwc662/On_Manifold_Counterexample
|
[
"0cd2de85083d60d73542dc59f33cbce53798cbf8"
] |
[
"RealNVP/script/learn_by_data.py"
] |
[
"__author__ = \"Xinqiang Ding <[email protected]>\"\n__date__ = \"2019/11/03 21:50:25\"\n\nimport numpy as np\nimport torch\ntorch.set_default_dtype(torch.float64)\nimport torch.optim as optim\nimport pickle\nimport math\nfrom sys import exit\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom RealNVP_2D import *\n\n## Masks used to define the number and the type of affine coupling layers\n## In each mask, 1 means that the variable at the correspoding position is\n## kept fixed in the affine couling layer\nmasks = [[1.0, 0.0],\n [0.0, 1.0],\n [1.0, 0.0], \n [0.0, 1.0],\n [1.0, 0.0], \n [0.0, 1.0],\n [1.0, 0.0],\n [0.0, 1.0]]\n\n## dimenstion of hidden units used in scale and translation transformation\nhidden_dim = 128\n\n## construct the RealNVP_2D object\nrealNVP = RealNVP_2D(masks, hidden_dim)\nif torch.cuda.device_count():\n realNVP = realNVP.cuda()\ndevice = next(realNVP.parameters()).device\n\noptimizer = optim.Adam(realNVP.parameters(), lr = 0.0001)\nnum_steps = 5000\n\n## the following loop learns the RealNVP_2D model by data\n## in each loop, data is dynamically sampled from the scipy moon dataset\nfor idx_step in range(num_steps):\n ## sample data from the scipy moon dataset\n X, label = datasets.make_moons(n_samples = 512, noise = 0.05)\n X = torch.Tensor(X).to(device = device)\n\n ## transform data X to latent space Z\n z, logdet = realNVP.inverse(X)\n\n ## calculate the negative loglikelihood of X\n loss = torch.log(z.new_tensor([2*math.pi])) + torch.mean(torch.sum(0.5*z**2, -1) - logdet)\n \n optimizer.zero_grad()\n loss.backward()\n \n optimizer.step()\n\n if (idx_step + 1) % 100 == 0:\n print(f\"idx_steps: {idx_step:}, loss: {loss.item():.5f}\")\n \n## after learning, we can test if the model can transform\n## the moon data distribution into the normal distribution\nX, label = datasets.make_moons(n_samples = 1000, noise = 0.05)\nX = torch.Tensor(X).to(device = device)\nz, logdet_jacobian = realNVP.inverse(X)\nz = z.cpu().detach().numpy()\n\nX = X.cpu().detach().numpy()\nfig = plt.figure(2, figsize = (12.8, 4.8))\nfig.clf()\nplt.subplot(1,2,1)\nplt.plot(X[label==0,0], X[label==0,1], \".\")\nplt.plot(X[label==1,0], X[label==1,1], \".\")\nplt.title(\"X sampled from Moon dataset\")\nplt.xlabel(r\"$x_1$\")\nplt.ylabel(r\"$x_2$\")\n\nplt.subplot(1,2,2)\nplt.plot(z[label==0,0], z[label==0,1], \".\")\nplt.plot(z[label==1,0], z[label==1,1], \".\")\nplt.title(\"Z transformed from X\")\nplt.xlabel(r\"$z_1$\")\nplt.ylabel(r\"$z_2$\")\nplt.savefig(\"./output/moon_z_transformed_from_x.png\")\n\n## after learning, we can also test if the model can transform\n## the normal distribution into the moon data distribution \nz = torch.normal(0, 1, size = (1000, 2)).to(device = device)\nX, _ = realNVP(z)\nX = X.cpu().detach().numpy()\nz = z.cpu().detach().numpy()\n\nfig = plt.figure(2, figsize = (12.8, 4.8))\nfig.clf()\nplt.subplot(1,2,1)\nplt.plot(z[:,0], z[:,1], \".\")\nplt.title(\"Z sampled from normal distribution\")\nplt.xlabel(r\"$z_1$\")\nplt.ylabel(r\"$z_2$\")\n\nplt.subplot(1,2,2)\nplt.plot(X[:,0], X[:,1], \".\")\nplt.title(\"X transformed from Z\")\nplt.xlabel(r\"$x_1$\")\nplt.ylabel(r\"$x_2$\")\nplt.savefig(\"./output/moon_x_transformed_from_z.png\")\n\n"
] |
[
[
"torch.normal",
"matplotlib.pyplot.title",
"torch.Tensor",
"torch.set_default_dtype",
"sklearn.datasets.make_moons",
"torch.sum",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlabel",
"torch.cuda.device_count",
"matplotlib.pyplot.figure"
]
] |
oldbridge/covid_euskadi
|
[
"1a8a67711333c6c9d428f8f146999a34acd25a5c"
] |
[
"plot_news.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 30 15:36:48 2021\n\n@author: Xabi\n\"\"\"\n\nfrom functions import read_from_csv_net, read_populations\nimport matplotlib.pyplot as plt\n\n\nif __name__ == '__main__':\n df = read_from_csv_net()\n \n fig, ax = plt.subplots(5, 1, sharex=True)\n \n sizes = read_populations()\n show_city = 'Usurbil'\n \n data = df['global']\n ax[0].plot(data['total_tests'])\n ax[0].plot(data['pcr_positives'])\n ax0_twin = ax[0].twinx() # instantiate a second axes that shares the same x-axis\n ax0_twin.plot(data['positivity_rate'] * 100, 'r')\n ax[0].grid(True)\n ax[0].legend(['Total tests', 'Positive tests'])\n ax0_twin.set_ylabel('Positivity rate (%)')\n ax[0].set_ylabel(\"Number of tests\")\n \n ax[1].plot(data['total_deaths'])\n ax1_twin = ax[1].twinx()\n ax1_twin.plot(data['daily_deaths'], 'r')\n ax1_twin.set_ylabel('Daily deaths')\n ax[1].set_ylabel(\"Total deaths\")\n ax[1].legend([\"Total deaths\"])\n ax[1].grid(True)\n \n ax[2].plot(data['uci_total'])\n ax[2].plot(data.index, [500] * len(data.index), 'r--')\n ax[2].legend(['ICU occupation', 'Available ICU'])\n ax[2].set_ylabel(\"ICU beds\")\n ax[2].grid(True)\n \n ax[3].plot(data['new_intake'])\n ax[3].plot(data['hosp_release_daily'])\n ax[3].legend(['New intake', 'Releases'])\n ax[3].set_ylabel(\"Patients\")\n ax[3].grid(True)\n \n city_data = df['municipios'][show_city]\n city_size = sizes[sizes.index == show_city]['inhabitants'][0]\n ax[4].plot(city_data)\n inci_abs = city_data.rolling(14).sum()\n inci_100k = inci_abs * 100000 / city_size\n ax4_twin = ax[4].twinx()\n ax4_twin.plot(inci_100k, 'r')\n ax4_twin.set_ylabel('14 day incidence per 100k inhabitants')\n ax[4].legend(['Daily positives'])\n ax[4].set_ylabel(\"Patients\")\n ax[4].grid(True)"
] |
[
[
"matplotlib.pyplot.subplots"
]
] |
Programmer-Sidharth/FridayChatBot
|
[
"7bef7561f61333bfa5e048017a89e04100ae1776"
] |
[
"FridayChatBot/tasks.py"
] |
[
"from .functionalities import *\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\nimport random\nimport numpy as np\n\nclass Tasks:\n # or make your own greetings\n greetings = np.array(['hi', 'hello', \"hey there\", \"how are you\", \"what are you doing\", \"how is it going\"])\n name = np.array(['who are you', 'who', 'your name', \"call you\", 'what', 'are you'])\n place = np.array(['where', \"you live\", \"place\", \"your\", \"house\", \"home\", \"family\"])\n feedbacks = np.array(['good', \"bad\", \"amazing\", \"fantastic\", \"wonderful\", \"worst\", 'better', \"beautiful\", 'shit', 'antique'])\n contact = np.array(['contact', 'email', 'your', 'phone', 'mobile'])\n bye = np.array(['bye', 'see you later', 'exit'])\n\n def __init__(self, bot):\n self.bot = bot\n\n def task_type(self, question):\n possible_tasks = {}\n array = np.array([{\"call\":\"greeting\", \"array\":Tasks.greetings}, {\"call\":\"name\", \"array\":Tasks.name}, {\"call\":\"places\", \"array\":Tasks.place}, {\"call\":\"contact\", \"array\":Tasks.contact}, {\"call\":\"bye\", \"array\":Tasks.bye}])\n\n def posibillities(array):\n for i in array:\n for word in i['array']:\n if word.lower() in question.lower():\n if i['call'] in possible_tasks:\n possible_tasks[i['call']] = int(possible_tasks[i['call']]) + 1\n else:\n possible_tasks[i['call']] = 1\n\n posibillities(array)\n return possible_tasks\n\n def do_tasks(self, task_types, question):\n data = []\n for task in task_types:\n if task == \"greeting\":\n data.append(greetings(self.bot))\n if task == \"name\":\n data.append(MyName(self.bot))\n elif task == \"places\":\n data.append(MyPlace(self.bot))\n elif task == \"contact\":\n data.append(contact_methods(self.bot))\n elif task == \"bye\":\n print(bye(self.bot))\n exit()\n if type(data) != str:\n return '. '.join(data)\n\n def work(self, question):\n data = []\n task_types = Tasks.task_type(self, question)\n tasks_data = Tasks.do_tasks(self, task_types, question)\n if type(tasks_data) != str:\n data.extend(tasks_data)\n else:\n data.append(tasks_data)\n if type(data) != str:\n joined_data = '. '.join(data)\n else:\n joined_data = data\n return {'response':joined_data, 'types':task_types}"
] |
[
[
"numpy.array"
]
] |
mathiasaap/SegCaps-1
|
[
"fe7fdd226d6c85ddca2e04f795311988a42bf985"
] |
[
"load_data_multiclass.py"
] |
[
"'''\nCapsules for Object Segmentation (SegCaps)\nOriginal Paper by Rodney LaLonde and Ulas Bagci (https://arxiv.org/abs/1804.04241)\nCode written by: Rodney LaLonde\nIf you use significant portions of this code or the ideas from our paper, please cite it :)\nIf you have any questions, please email me at [email protected].\n\nThis file is used for loading training, validation, and testing data into the models.\nIt is specifically designed to handle 3D single-channel medical data.\nModifications will be needed to train/test on normal 3-channel images.\n'''\n\nfrom __future__ import print_function\n\nimport threading\nfrom os.path import join, basename\nfrom os import mkdir\nfrom glob import glob\nimport csv\nfrom sklearn.model_selection import KFold\nimport numpy as np\nfrom numpy.random import rand, shuffle\nimport SimpleITK as sitk\nfrom sklearn.model_selection import train_test_split\nfrom tqdm import tqdm\nimport random\nimport time\nfrom load_heart import convert_heart_data_to_numpy\nfrom load_spleen import convert_spleen_data_to_numpy\nfrom load_brats import convert_brats_data_to_numpy\nfrom load_hepatic import convert_hepatic_data_to_numpy\nfrom load_colon import convert_colon_data_to_numpy\nfrom load_pancreas import convert_pancreas_data_to_numpy\nfrom postprocess import oneHot2LabelMax\nfrom augmentation import augment_random, elasticDeform2D, elasticDeform3D\n\nfrom scipy import linalg\n\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nplt.ioff()\n\nfrom keras.preprocessing.image import *\n\nfrom custom_data_aug import elastic_transform, salt_pepper_noise\n\ndebug = 0\n\ndef load_data(root, split):\n # Load the training and testing lists\n with open(join(root, 'split_lists', 'train_split_' + str(split) + '.csv'), 'r') as f:\n reader = csv.reader(f)\n training_list = list(reader)\n\n with open(join(root, 'split_lists', 'test_split_' + str(split) + '.csv'), 'r') as f:\n reader = csv.reader(f)\n testing_list = list(reader)\n\n new_training_list, validation_list = train_test_split(training_list, test_size = 0.1, random_state = 7)\n if new_training_list == []: # if training_list only have 1 image file.\n new_training_list = validation_list\n return new_training_list, validation_list, testing_list\n\ndef compute_class_weights(root, train_data_list):\n '''\n We want to weight the the positive pixels by the ratio of negative to positive.\n Three scenarios:\n 1. Equal classes. neg/pos ~ 1. Standard binary cross-entropy\n 2. Many more negative examples. The network will learn to always output negative. In this way we want to\n increase the punishment for getting a positive wrong that way it will want to put positive more\n 3. Many more positive examples. We weight the positive value less so that negatives have a chance.\n '''\n pos = 0.0\n neg = 0.0\n for img_name in tqdm(train_data_list):\n img = sitk.GetArrayFromImage(sitk.ReadImage(join(root, 'masks', img_name[0])))\n for slic in img:\n if not np.any(slic):\n continue\n else:\n p = np.count_nonzero(slic)\n pos += p\n neg += (slic.size - p)\n\n return neg/pos\n\ndef compute_multiclass_weights(root, train_data_list, num_classes):\n '''\n We want to weight the the positive pixels by the ratio of negative to positive.\n Three scenarios:\n 1. Equal classes. neg/pos ~ 1. Standard binary cross-entropy\n 2. Many more negative examples. The network will learn to always output negative. In this way we want to\n increase the punishment for getting a positive wrong that way it will want to put positive more\n 3. Many more positive examples. We weight the positive value less so that negatives have a chance.\n '''\n pos = np.array([0.0 for _ in range(num_classes)])\n\n for img_name in tqdm(train_data_list):\n img = sitk.GetArrayFromImage(sitk.ReadImage(join(root, 'masks', img_name[0])))\n for slic in img:\n if not np.any(slic):\n continue\n else:\n for i in range(num_classes):\n p = np.count_nonzero(slic==i)\n pos[i] += p\n \n weights = 1 - (pos / np.sum(pos))\n \n return weights\n\ndef load_class_weights(root, split):\n class_weight_filename = join(root, 'split_lists', 'train_split_' + str(split) + '_class_weights.npy')\n try:\n return np.load(class_weight_filename)\n except:\n print('\\nClass weight file {} not found.\\nComputing class weights now. This may take '\n 'some time.'.format(class_weight_filename))\n train_data_list, _, _ = load_data(root, str(split))\n value = compute_class_weights(root, train_data_list)\n np.save(class_weight_filename,value)\n print('\\nFinished computing class weights. This value has been saved for this training split.')\n return value\n \ndef load_multiclass_weights(root, split, num_classes):\n class_weight_filename = join(root, 'split_lists', 'train_split_' + str(split) + '_class_weights.npy')\n try:\n return np.load(class_weight_filename)\n except:\n print('\\nClass weight file {} not found.\\nComputing class weights now. This may take '\n 'some time.'.format(class_weight_filename))\n train_data_list, _, _ = load_data(root, str(split))\n value = compute_multiclass_weights(root, train_data_list, num_classes)\n np.save(class_weight_filename,value)\n print('\\nFinished computing class weights. This value has been saved for this training split.')\n return value\n\n\ndef split_data(root_path, num_splits):\n mask_list = []\n for ext in ('*.mhd', '*.hdr', '*.nii', '*.png', '*.nii.gz'): #add png file support\n mask_list.extend(sorted(glob(join(root_path,'masks',ext)))) # check imgs instead of masks\n\n assert len(mask_list) != 0, 'Unable to find any files in {}'.format(join(root_path,'masks'))\n print(mask_list)\n outdir = join(root_path,'split_lists')\n try:\n makedirs(outdir)\n print(\"Made directory\")\n except:\n pass\n print(\"Could not make dir {}\".format(outdir))\n\n if num_splits == 1:\n # Testing model, training set = testing set = 1 image\n train_index = test_index = mask_list\n with open(join(outdir,'train_split_' + str(0) + '.csv'), 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n print('basename=%s'%([basename(mask_list[0])]))\n writer.writerow([basename(mask_list[0])])\n with open(join(outdir,'test_split_' + str(0) + '.csv'), 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n writer.writerow([basename(mask_list[0])])\n\n else:\n kf = KFold(n_splits=num_splits)\n n = 0\n for train_index, test_index in kf.split(mask_list):\n with open(join(outdir,'train_split_' + str(n) + '.csv'), 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for i in train_index:\n writer.writerow([basename(mask_list[i])])\n with open(join(outdir,'test_split_' + str(n) + '.csv'), 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for i in test_index:\n writer.writerow([basename(mask_list[i])])\n n += 1\n\n\n\n''' Make the generators threadsafe in case of multiple threads '''\nclass threadsafe_iter:\n \"\"\"Takes an iterator/generator and makes it thread-safe by\n serializing call to the `next` method of given iterator/generator.\n \"\"\"\n def __init__(self, it):\n self.it = it\n self.lock = threading.Lock()\n\n def __iter__(self):\n return self\n\n def next(self):\n with self.lock:\n return self.it.next()\n\n\ndef threadsafe_generator(f):\n \"\"\"A decorator that takes a generator function and makes it thread-safe.\n \"\"\"\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g\n\ndef get_np_converter(dataset):\n if dataset == 'heart':\n return convert_heart_data_to_numpy\n elif dataset == 'hepatic':\n return convert_hepatic_data_to_numpy\n elif dataset == 'colon':\n return convert_colon_data_to_numpy\n elif dataset == 'pancreas':\n return convert_pancreas_data_to_numpy\n else:\n return convert_spleen_data_to_numpy\n \none_hot_max = 1.0 # Value of positive class in one hot\n\n\n@threadsafe_generator\n@threadsafe_generator\ndef generate_train_batches(root_path, train_list, net_input_shape, net, batchSize=1, numSlices=1, subSampAmt=-1,\n stride=1, downSampAmt=1, shuff=1, aug_data=1, dataset = 'brats', num_output_classes=2):\n print('train ' + str(dataset))\n modalities = net_input_shape[2] // numSlices\n input_slices = numSlices\n img_batch = np.zeros((np.concatenate(((batchSize,), net_input_shape))), dtype=np.float32)\n mask_shape = [net_input_shape[0], net_input_shape[1], num_output_classes]\n mask_batch = np.zeros((np.concatenate(((batchSize,), mask_shape))), dtype=np.float32)\n\n if dataset == 'brats':\n np_converter = convert_brats_data_to_numpy\n frame_pixels_0 = 8\n frame_pixels_1 = -8\n empty_mask = np.array([one_hot_max, 1-one_hot_max, 1-one_hot_max, 1-one_hot_max])\n raw_x_shape = 240\n raw_y_shape = 240\n elif dataset in ['heart', 'spleen', 'colon', 'hepatic', 'pancreas']:\n np_converter = get_np_converter(dataset)\n frame_pixels_0 = 0\n frame_pixels_1 = net_input_shape[0]\n if num_output_classes == 2:\n empty_mask = np.array([one_hot_max, 1-one_hot_max])\n else:\n empty_mask = np.array([1-one_hot_max])\n raw_x_shape = net_input_shape[0]\n raw_y_shape = net_input_shape[1]\n else:\n assert False, 'Dataset not recognized'\n\n is_binary_classification = num_output_classes == 1\n while True:\n if shuff:\n shuffle(train_list)\n count = 0\n for i, scan_name in enumerate(train_list):\n try:\n scan_name = scan_name[0]\n path_to_np = join(root_path,'np_files',basename(scan_name)[:-6]+'npz')\n #print('\\npath_to_np=%s'%(path_to_np))\n with np.load(path_to_np) as data:\n train_img = data['img']\n train_mask = data['mask']\n except:\n #print('\\nPre-made numpy array not found for {}.\\nCreating now...'.format(scan_name[:-7]))\n train_img, train_mask = np_converter(root_path, scan_name, num_classes=num_output_classes)\n if np.array_equal(train_img,np.zeros(1)):\n continue\n else:\n print('\\nFinished making npz file.')\n #print(\"Train mask shape {}\".format(train_mask.shape))\n\n if numSlices == 1:\n sideSlices = 0\n else:\n if numSlices % 2 != 0:\n numSlices -= 1\n sideSlices = numSlices / 2\n\n z_shape = train_img.shape[2]\n indicies = np.arange(0, z_shape, stride)\n\n if shuff:\n shuffle(indicies)\n for j in indicies:\n\n if (is_binary_classification and np.sum(train_mask[:, :, j]) < 1) or (not is_binary_classification and np.sum(train_mask[:, :, j, 1:]) < 1):\n #print('hola')\n continue\n if img_batch.ndim == 4:\n img_batch[count] = 0\n next_img = train_img[:, :, max(j-sideSlices,0):min(j+sideSlices+1,z_shape)].reshape(raw_x_shape, raw_y_shape, -1)\n insertion_index = -modalities\n img_index = 0\n for k in range(j-sideSlices, j+sideSlices+1):\n insertion_index += modalities\n if (k < 0): continue\n if (k >= z_shape): break\n img_batch[count, frame_pixels_0:frame_pixels_1, frame_pixels_0:frame_pixels_1, insertion_index:insertion_index+modalities] = next_img[:, :, img_index:img_index+modalities]\n img_index += modalities\n mask_batch[count] = empty_mask\n mask_batch[count, frame_pixels_0:frame_pixels_1, frame_pixels_0:frame_pixels_1, :] = train_mask[:, :, j]\n else:\n print('\\nError this function currently only supports 2D and 3D data.')\n exit(0)\n if aug_data:\n img_batch[count], mask_batch[count] = augment_random(img_batch[count], mask_batch[count])\n count += 1\n if count % batchSize == 0:\n count = 0\n if debug:\n if img_batch.ndim == 4:\n plt.imshow(np.squeeze(img_batch[0, :, :, 0]), cmap='gray')\n plt.savefig(join(root_path, 'logs', 'ex{}_train_slice1.png'.format(j)), format='png', bbox_inches='tight')\n plt.close()\n plt.imshow(np.squeeze(img_batch[0, :, :, 4]), cmap='gray')\n plt.savefig(join(root_path, 'logs', 'ex{}_train_slice2.png'.format(j)), format='png', bbox_inches='tight')\n plt.close()\n plt.imshow(np.squeeze(img_batch[0, :, :, 8]), cmap='gray')\n plt.savefig(join(root_path, 'logs', 'ex{}_train_slice3_main.png'.format(j)), format='png', bbox_inches='tight')\n plt.close()\n plt.imshow(np.squeeze(mask_batch[0, :, :, 0]), alpha=0.15)\n plt.savefig(join(root_path, 'logs', 'ex{}_train_label.png'.format(j)), format='png', bbox_inches='tight')\n plt.close()\n plt.imshow(np.squeeze(img_batch[0, :, :, 12]), cmap='gray')\n plt.savefig(join(root_path, 'logs', 'ex{}_train_slice4.png'.format(j)), format='png', bbox_inches='tight')\n plt.close()\n plt.imshow(np.squeeze(img_batch[0, :, :, 16]), cmap='gray')\n plt.savefig(join(root_path, 'logs', 'ex{}_train_slice5.png'.format(j)), format='png', bbox_inches='tight')\n plt.close()\n '''elif img_batch.ndim == 5:\n plt.imshow(np.squeeze(img_batch[0, :, :, 0, 0]), cmap='gray')\n plt.imshow(np.squeeze(mask_batch[0, :, :, 0, 0]), alpha=0.15)\n plt.savefig(join(root_path, 'logs', 'ex_train.png'), format='png', bbox_inches='tight')\n plt.close()'''\n if net.find('caps') != -1: # if the network is capsule/segcaps structure\n mid_slice = input_slices // 2\n start_index = mid_slice * modalities\n img_batch_mid_slice = img_batch[:, :, :, start_index:start_index+modalities]\n\n mask_batch_masked = oneHot2LabelMax(mask_batch)\n mask_batch_masked[mask_batch_masked > 0.5] = 1.0 # Setting all other classes than background to mask\n mask_batch_masked = np.expand_dims(mask_batch_masked, axis=-1)\n mask_batch_masked_expand = np.repeat(mask_batch_masked, modalities, axis=-1)\n\n masked_img = mask_batch_masked_expand*img_batch_mid_slice\n\n '''plt.imshow(np.squeeze(img_batch[0, :, :, 0]), cmap='gray')\n plt.savefig(join(root_path, 'logs', '{}_img.png'.format(j)), format='png', bbox_inches='tight')\n plt.close()\n plt.imshow(np.squeeze(mask_batch_masked[0, :, :, 0]), cmap='gray')\n plt.savefig(join(root_path, 'logs', '{}_mask_masked.png'.format(j)), format='png', bbox_inches='tight')\n plt.close()\n plt.imshow(np.squeeze(mask_batch[0, :, :, 0]), cmap='gray')\n plt.savefig(join(root_path, 'logs', '{}_mask.png'.format(j)), format='png', bbox_inches='tight')\n plt.close()\n plt.imshow(np.squeeze(masked_img[0, :, :, 0]), cmap='gray')\n plt.savefig(join(root_path, 'logs', '{}_masked_img.png'.format(j)), format='png', bbox_inches='tight')\n plt.close()'''\n yield ([img_batch, mask_batch_masked], [mask_batch, masked_img])\n else:\n yield (img_batch, mask_batch)\n if count != 0:\n #if aug_data:\n # img_batch[:count,...], mask_batch[:count,...] = augmentImages(img_batch[:count,...],\n # mask_batch[:count,...])\n if net.find('caps') != -1: \n sub_img_batch = img_batch[:count, ...]\n sub_mask_batch = mask_batch[:count, ...]\n \n mid_slice = input_slices // 2\n start_index = mid_slice * modalities\n img_batch_mid_slice = sub_img_batch[:, :, :, start_index:start_index+modalities]\n\n mask_batch_masked = oneHot2LabelMax(sub_mask_batch)\n mask_batch_masked[mask_batch_masked > 0.5] = 1.0 # Setting all other classes than background to mask\n mask_batch_masked = np.expand_dims(mask_batch_masked, axis=-1)\n mask_batch_masked_expand = np.repeat(mask_batch_masked, modalities, axis=-1)\n\n masked_img = mask_batch_masked_expand*img_batch_mid_slice\n yield ([sub_img_batch, mask_batch_masked], [sub_mask_batch, masked_img])\n else:\n yield (img_batch[:count,...], mask_batch[:count,...])\n\n@threadsafe_generator\ndef generate_val_batches(root_path, val_list, net_input_shape, net, batchSize=1, numSlices=1, subSampAmt=-1,\n stride=1, downSampAmt=1, shuff=1, dataset = 'brats', num_output_classes=2):\n # Create placeholders for validation\n\n modalities = net_input_shape[2] // numSlices\n input_slices = numSlices\n img_batch = np.zeros((np.concatenate(((batchSize,), net_input_shape))), dtype=np.float32)\n mask_shape = [net_input_shape[0],net_input_shape[1], num_output_classes]\n mask_batch = np.zeros((np.concatenate(((batchSize,), mask_shape))), dtype=np.float32)\n\n if dataset == 'brats':\n np_converter = convert_brats_data_to_numpy\n frame_pixels_0 = 8\n frame_pixels_1 = -8\n empty_mask = np.array([one_hot_max, 1-one_hot_max, 1-one_hot_max, 1-one_hot_max])\n raw_x_shape = 240\n raw_y_shape = 240\n elif dataset in ['heart', 'spleen', 'colon', 'hepatic', 'pancreas']:\n np_converter = get_np_converter(dataset)\n frame_pixels_0 = 0\n frame_pixels_1 = net_input_shape[0]\n if num_output_classes == 2:\n empty_mask = np.array([one_hot_max, 1-one_hot_max])\n else:\n empty_mask = np.array([1-one_hot_max])\n raw_x_shape = net_input_shape[0]\n raw_y_shape = net_input_shape[1]\n else:\n assert False, 'Dataset not recognized'\n\n is_binary_classification = num_output_classes == 1\n \n while True:\n if shuff:\n shuffle(val_list)\n count = 0\n for i, scan_name in enumerate(val_list):\n try:\n scan_name = scan_name[0]\n path_to_np = join(root_path,'np_files',basename(scan_name)[:-6]+'npz')\n with np.load(path_to_np) as data:\n val_img = data['img']\n val_mask = data['mask']\n except:\n print('\\nPre-made numpy array not found for {}.\\nCreating now...'.format(scan_name[:-7]))\n val_img, val_mask = np_converter(root_path, scan_name, num_classes=num_output_classes)\n if np.array_equal(val_img,np.zeros(1)):\n continue\n else:\n print('\\nFinished making npz file.')\n\n if numSlices == 1:\n sideSlices = 0\n else:\n if numSlices % 2 != 0:\n numSlices -= 1\n sideSlices = numSlices / 2\n\n z_shape = val_img.shape[2]\n indicies = np.arange(0, z_shape, stride)\n\n if shuff:\n shuffle(indicies)\n\n for j in indicies:\n if (is_binary_classification and np.sum(val_mask[:, :, j]) < 1) or (not is_binary_classification and np.sum(val_mask[:, :, j, 1:]) < 1):\n continue\n if img_batch.ndim == 4:\n img_batch[count] = 0\n next_img = val_img[:, :, max(j-sideSlices,0):min(j+sideSlices+1,z_shape)].reshape(raw_x_shape, raw_y_shape, -1)\n insertion_index = -modalities\n img_index = 0\n for k in range(j-sideSlices, j+sideSlices+1):\n insertion_index += modalities\n if (k < 0): continue\n if (k >= z_shape): break\n img_batch[count, frame_pixels_0:frame_pixels_1, frame_pixels_0:frame_pixels_1, insertion_index:insertion_index+modalities] = next_img[:, :, img_index:img_index+modalities]\n img_index += modalities\n\n mask_batch[count] = empty_mask\n mask_batch[count, frame_pixels_0:frame_pixels_1, frame_pixels_0:frame_pixels_1, :] = val_mask[:, :, j]\n else:\n print('\\nError this function currently only supports 2D and 3D data.')\n exit(0)\n\n count += 1\n if count % batchSize == 0:\n count = 0\n if net.find('caps') != -1: # if the network is capsule/segcaps structure \n mid_slice = input_slices // 2\n start_index = mid_slice * modalities\n img_batch_mid_slice = img_batch[:, :, :, start_index:start_index+modalities]\n\n mask_batch_masked = oneHot2LabelMax(mask_batch)\n mask_batch_masked[mask_batch_masked > 0.5] = 1.0 # Setting all other classes than background to mask\n mask_batch_masked = np.expand_dims(mask_batch_masked, axis=-1)\n mask_batch_masked_expand = np.repeat(mask_batch_masked, modalities, axis=-1)\n\n masked_img = mask_batch_masked_expand*img_batch_mid_slice\n yield ([img_batch, mask_batch_masked], [mask_batch, masked_img])\n else:\n yield (img_batch, mask_batch)\n\n if count != 0:\n #if aug_data:\n # img_batch[:count,...], mask_batch[:count,...] = augmentImages(img_batch[:count,...],\n # mask_batch[:count,...])\n if net.find('caps') != -1: \n sub_img_batch = img_batch[:count, ...]\n sub_mask_batch = mask_batch[:count, ...]\n \n mid_slice = input_slices // 2\n start_index = mid_slice * modalities\n img_batch_mid_slice = sub_img_batch[:, :, :, start_index:start_index+modalities]\n\n mask_batch_masked = oneHot2LabelMax(sub_mask_batch)\n mask_batch_masked[mask_batch_masked > 0.5] = 1.0 # Setting all other classes than background to mask\n mask_batch_masked = np.expand_dims(mask_batch_masked, axis=-1)\n mask_batch_masked_expand = np.repeat(mask_batch_masked, modalities, axis=-1)\n\n masked_img = mask_batch_masked_expand*img_batch_mid_slice\n yield ([sub_img_batch, mask_batch_masked], [sub_mask_batch, masked_img])\n else:\n yield (img_batch[:count,...], mask_batch[:count,...])\n\n@threadsafe_generator\ndef generate_test_batches(root_path, test_list, net_input_shape, batchSize=1, numSlices=1, subSampAmt=0,\n stride=1, downSampAmt=1, dataset = 'brats', num_output_classes=2):\n # Create placeholders for testing\n print('Generate test batches for ' + str(dataset))\n print('\\nload_3D_data.generate_test_batches')\n print(\"Batch size {}\".format(batchSize))\n img_batch = np.zeros((np.concatenate(((batchSize,), net_input_shape))), dtype=np.float32)\n modalities = net_input_shape[2] // numSlices\n count = 0\n print('\\nload_3D_data.generate_test_batches: test_list=%s'%(test_list))\n\n if dataset == 'brats':\n np_converter = convert_brats_data_to_numpy\n frame_pixels_0 = 8\n frame_pixels_1 = -8\n raw_x_shape = 240\n raw_y_shape = 240\n elif dataset in ['heart', 'spleen', 'colon', 'hepatic', 'pancreas']:\n np_converter = get_np_converter(dataset)\n frame_pixels_0 = 0\n frame_pixels_1 = net_input_shape[0]\n raw_x_shape = net_input_shape[0]\n raw_y_shape = net_input_shape[1]\n else:\n assert False, 'Dataset not recognized'\n\n for i, scan_name in enumerate(test_list):\n try:\n scan_name = scan_name[0]\n path_to_np = join(root_path,'np_files',basename(scan_name)[:-6]+'npz')\n print(path_to_np)\n with np.load(path_to_np) as data:\n test_img = data['img']\n except Exception as err:\n print(err)\n print('\\nPre-made numpy array not found for {}.\\nCreating now...'.format(scan_name[:-7]))\n test_img = np_converter(root_path, scan_name, no_masks=False, num_classes=num_output_classes)[0]\n if np.array_equal(test_img,np.zeros(1)):\n continue\n else:\n print('\\nFinished making npz file.')\n\n if numSlices == 1:\n sideSlices = 0\n else:\n if numSlices % 2 != 0:\n numSlices -= 1\n sideSlices = numSlices / 2\n\n z_shape = test_img.shape[2]\n indicies = np.arange(0, z_shape, stride)\n\n for j in indicies:\n if img_batch.ndim == 4:\n img_batch[count] = 0\n next_img = test_img[:, :, max(j-sideSlices,0):min(j+sideSlices+1,z_shape)].reshape(raw_x_shape, raw_y_shape, -1)\n insertion_index = -modalities\n img_index = 0\n for k in range(j-sideSlices, j+sideSlices+1):\n insertion_index += modalities\n if (k < 0): continue\n if (k >= z_shape): break\n img_batch[count, frame_pixels_0:frame_pixels_1, frame_pixels_0:frame_pixels_1, insertion_index:insertion_index+modalities] = next_img[:, :, img_index:img_index+modalities]\n img_index += modalities\n elif img_batch.ndim == 5:\n # Assumes img and mask are single channel. Replace 0 with : if multi-channel.\n img_batch[count, frame_pixels_0:frame_pixels_1, frame_pixels_0:frame_pixels_1, :, :] = test_img[:, :, j : j+numSlices]\n else:\n print('Error this function currently only supports 2D and 3D data.')\n exit(0)\n\n count += 1\n if count % batchSize == 0:\n count = 0\n yield (img_batch)\n\n if count != 0:\n yield (img_batch[:count,:,:,:])\n"
] |
[
[
"numpy.expand_dims",
"matplotlib.use",
"numpy.arange",
"numpy.squeeze",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.KFold",
"numpy.save",
"numpy.concatenate",
"matplotlib.pyplot.ioff",
"numpy.random.shuffle",
"numpy.any",
"numpy.count_nonzero",
"matplotlib.pyplot.close",
"numpy.load",
"numpy.repeat",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
daniellebrown/pandas
|
[
"bd00a1c8e6dc778ffc2b9271b21942e4b793c6d1"
] |
[
"pandas/tests/frame/test_constructors.py"
] |
[
"from collections import OrderedDict, abc\nfrom datetime import datetime, timedelta\nimport functools\nimport itertools\n\nimport numpy as np\nimport numpy.ma as ma\nimport numpy.ma.mrecords as mrecords\nimport pytest\n\nfrom pandas.compat import is_platform_little_endian\n\nfrom pandas.core.dtypes.common import is_integer_dtype\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n Index,\n MultiIndex,\n RangeIndex,\n Series,\n Timedelta,\n Timestamp,\n date_range,\n isna,\n)\nfrom pandas.core.construction import create_series_with_explicit_dtype\nimport pandas.util.testing as tm\n\nMIXED_FLOAT_DTYPES = [\"float16\", \"float32\", \"float64\"]\nMIXED_INT_DTYPES = [\n \"uint8\",\n \"uint16\",\n \"uint32\",\n \"uint64\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n]\n\n\nclass TestDataFrameConstructors:\n @pytest.mark.parametrize(\n \"constructor\",\n [\n lambda: DataFrame(),\n lambda: DataFrame(None),\n lambda: DataFrame({}),\n lambda: DataFrame(()),\n lambda: DataFrame([]),\n lambda: DataFrame((_ for _ in [])),\n lambda: DataFrame(range(0)),\n lambda: DataFrame(data=None),\n lambda: DataFrame(data={}),\n lambda: DataFrame(data=()),\n lambda: DataFrame(data=[]),\n lambda: DataFrame(data=(_ for _ in [])),\n lambda: DataFrame(data=range(0)),\n ],\n )\n def test_empty_constructor(self, constructor):\n expected = DataFrame()\n result = constructor()\n assert len(result.index) == 0\n assert len(result.columns) == 0\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"emptylike,expected_index,expected_columns\",\n [\n ([[]], RangeIndex(1), RangeIndex(0)),\n ([[], []], RangeIndex(2), RangeIndex(0)),\n ([(_ for _ in [])], RangeIndex(1), RangeIndex(0)),\n ],\n )\n def test_emptylike_constructor(self, emptylike, expected_index, expected_columns):\n expected = DataFrame(index=expected_index, columns=expected_columns)\n result = DataFrame(emptylike)\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_mixed(self, float_string_frame):\n index, data = tm.getMixedTypeDict()\n\n # TODO(wesm), incomplete test?\n indexed_frame = DataFrame(data, index=index) # noqa\n unindexed_frame = DataFrame(data) # noqa\n\n assert float_string_frame[\"foo\"].dtype == np.object_\n\n def test_constructor_cast_failure(self):\n foo = DataFrame({\"a\": [\"a\", \"b\", \"c\"]}, dtype=np.float64)\n assert foo[\"a\"].dtype == object\n\n # GH 3010, constructing with odd arrays\n df = DataFrame(np.ones((4, 2)))\n\n # this is ok\n df[\"foo\"] = np.ones((4, 2)).tolist()\n\n # this is not ok\n msg = \"Wrong number of items passed 2, placement implies 1\"\n with pytest.raises(ValueError, match=msg):\n df[\"test\"] = np.ones((4, 2))\n\n # this is ok\n df[\"foo2\"] = np.ones((4, 2)).tolist()\n\n def test_constructor_dtype_copy(self):\n orig_df = DataFrame({\"col1\": [1.0], \"col2\": [2.0], \"col3\": [3.0]})\n\n new_df = pd.DataFrame(orig_df, dtype=float, copy=True)\n\n new_df[\"col1\"] = 200.0\n assert orig_df[\"col1\"][0] == 1.0\n\n def test_constructor_dtype_nocast_view(self):\n df = DataFrame([[1, 2]])\n should_be_view = DataFrame(df, dtype=df[0].dtype)\n should_be_view[0][0] = 99\n assert df.values[0, 0] == 99\n\n should_be_view = DataFrame(df.values, dtype=df[0].dtype)\n should_be_view[0][0] = 97\n assert df.values[0, 0] == 97\n\n def test_constructor_dtype_list_data(self):\n df = DataFrame([[1, \"2\"], [None, \"a\"]], dtype=object)\n assert df.loc[1, 0] is None\n assert df.loc[0, 1] == \"2\"\n\n def test_constructor_list_frames(self):\n # see gh-3243\n result = DataFrame([DataFrame()])\n assert result.shape == (1, 0)\n\n result = DataFrame([DataFrame(dict(A=np.arange(5)))])\n assert isinstance(result.iloc[0, 0], DataFrame)\n\n def test_constructor_mixed_dtypes(self):\n def _make_mixed_dtypes_df(typ, ad=None):\n\n if typ == \"int\":\n dtypes = MIXED_INT_DTYPES\n arrays = [np.array(np.random.rand(10), dtype=d) for d in dtypes]\n elif typ == \"float\":\n dtypes = MIXED_FLOAT_DTYPES\n arrays = [\n np.array(np.random.randint(10, size=10), dtype=d) for d in dtypes\n ]\n\n for d, a in zip(dtypes, arrays):\n assert a.dtype == d\n if ad is None:\n ad = dict()\n ad.update({d: a for d, a in zip(dtypes, arrays)})\n return DataFrame(ad)\n\n def _check_mixed_dtypes(df, dtypes=None):\n if dtypes is None:\n dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES\n for d in dtypes:\n if d in df:\n assert df.dtypes[d] == d\n\n # mixed floating and integer coexist in the same frame\n df = _make_mixed_dtypes_df(\"float\")\n _check_mixed_dtypes(df)\n\n # add lots of types\n df = _make_mixed_dtypes_df(\"float\", dict(A=1, B=\"foo\", C=\"bar\"))\n _check_mixed_dtypes(df)\n\n # GH 622\n df = _make_mixed_dtypes_df(\"int\")\n _check_mixed_dtypes(df)\n\n def test_constructor_complex_dtypes(self):\n # GH10952\n a = np.random.rand(10).astype(np.complex64)\n b = np.random.rand(10).astype(np.complex128)\n\n df = DataFrame({\"a\": a, \"b\": b})\n assert a.dtype == df.a.dtype\n assert b.dtype == df.b.dtype\n\n def test_constructor_dtype_str_na_values(self, string_dtype):\n # https://github.com/pandas-dev/pandas/issues/21083\n df = DataFrame({\"A\": [\"x\", None]}, dtype=string_dtype)\n result = df.isna()\n expected = DataFrame({\"A\": [False, True]})\n tm.assert_frame_equal(result, expected)\n assert df.iloc[1, 0] is None\n\n df = DataFrame({\"A\": [\"x\", np.nan]}, dtype=string_dtype)\n assert np.isnan(df.iloc[1, 0])\n\n def test_constructor_rec(self, float_frame):\n rec = float_frame.to_records(index=False)\n rec.dtype.names = list(rec.dtype.names)[::-1]\n\n index = float_frame.index\n\n df = DataFrame(rec)\n tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))\n\n df2 = DataFrame(rec, index=index)\n tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))\n tm.assert_index_equal(df2.index, index)\n\n rng = np.arange(len(rec))[::-1]\n df3 = DataFrame(rec, index=rng, columns=[\"C\", \"B\"])\n expected = DataFrame(rec, index=rng).reindex(columns=[\"C\", \"B\"])\n tm.assert_frame_equal(df3, expected)\n\n def test_constructor_bool(self):\n df = DataFrame({0: np.ones(10, dtype=bool), 1: np.zeros(10, dtype=bool)})\n assert df.values.dtype == np.bool_\n\n def test_constructor_overflow_int64(self):\n # see gh-14881\n values = np.array([2 ** 64 - i for i in range(1, 10)], dtype=np.uint64)\n\n result = DataFrame({\"a\": values})\n assert result[\"a\"].dtype == np.uint64\n\n # see gh-2355\n data_scores = [\n (6311132704823138710, 273),\n (2685045978526272070, 23),\n (8921811264899370420, 45),\n (17019687244989530680, 270),\n (9930107427299601010, 273),\n ]\n dtype = [(\"uid\", \"u8\"), (\"score\", \"u8\")]\n data = np.zeros((len(data_scores),), dtype=dtype)\n data[:] = data_scores\n df_crawls = DataFrame(data)\n assert df_crawls[\"uid\"].dtype == np.uint64\n\n @pytest.mark.parametrize(\n \"values\",\n [\n np.array([2 ** 64], dtype=object),\n np.array([2 ** 65]),\n [2 ** 64 + 1],\n np.array([-(2 ** 63) - 4], dtype=object),\n np.array([-(2 ** 64) - 1]),\n [-(2 ** 65) - 2],\n ],\n )\n def test_constructor_int_overflow(self, values):\n # see gh-18584\n value = values[0]\n result = DataFrame(values)\n\n assert result[0].dtype == object\n assert result[0][0] == value\n\n def test_constructor_ordereddict(self):\n import random\n\n nitems = 100\n nums = list(range(nitems))\n random.shuffle(nums)\n expected = [\"A{i:d}\".format(i=i) for i in nums]\n df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))\n assert expected == list(df.columns)\n\n def test_constructor_dict(self):\n datetime_series = tm.makeTimeSeries(nper=30)\n # test expects index shifted by 5\n datetime_series_short = tm.makeTimeSeries(nper=30)[5:]\n\n frame = DataFrame({\"col1\": datetime_series, \"col2\": datetime_series_short})\n\n # col2 is padded with NaN\n assert len(datetime_series) == 30\n assert len(datetime_series_short) == 25\n\n tm.assert_series_equal(frame[\"col1\"], datetime_series.rename(\"col1\"))\n\n exp = pd.Series(\n np.concatenate([[np.nan] * 5, datetime_series_short.values]),\n index=datetime_series.index,\n name=\"col2\",\n )\n tm.assert_series_equal(exp, frame[\"col2\"])\n\n frame = DataFrame(\n {\"col1\": datetime_series, \"col2\": datetime_series_short},\n columns=[\"col2\", \"col3\", \"col4\"],\n )\n\n assert len(frame) == len(datetime_series_short)\n assert \"col1\" not in frame\n assert isna(frame[\"col3\"]).all()\n\n # Corner cases\n assert len(DataFrame()) == 0\n\n # mix dict and array, wrong size - no spec for which error should raise\n # first\n with pytest.raises(ValueError):\n DataFrame({\"A\": {\"a\": \"a\", \"b\": \"b\"}, \"B\": [\"a\", \"b\", \"c\"]})\n\n # Length-one dict micro-optimization\n frame = DataFrame({\"A\": {\"1\": 1, \"2\": 2}})\n tm.assert_index_equal(frame.index, pd.Index([\"1\", \"2\"]))\n\n # empty dict plus index\n idx = Index([0, 1, 2])\n frame = DataFrame({}, index=idx)\n assert frame.index is idx\n\n # empty dict with index and columns\n idx = Index([0, 1, 2])\n frame = DataFrame({}, index=idx, columns=idx)\n assert frame.index is idx\n assert frame.columns is idx\n assert len(frame._series) == 3\n\n # with dict of empty list and Series\n frame = DataFrame({\"A\": [], \"B\": []}, columns=[\"A\", \"B\"])\n tm.assert_index_equal(frame.index, Index([], dtype=np.int64))\n\n # GH 14381\n # Dict with None value\n frame_none = DataFrame(dict(a=None), index=[0])\n frame_none_list = DataFrame(dict(a=[None]), index=[0])\n assert frame_none._get_value(0, \"a\") is None\n assert frame_none_list._get_value(0, \"a\") is None\n tm.assert_frame_equal(frame_none, frame_none_list)\n\n # GH10856\n # dict with scalar values should raise error, even if columns passed\n msg = \"If using all scalar values, you must pass an index\"\n with pytest.raises(ValueError, match=msg):\n DataFrame({\"a\": 0.7})\n\n with pytest.raises(ValueError, match=msg):\n DataFrame({\"a\": 0.7}, columns=[\"a\"])\n\n @pytest.mark.parametrize(\"scalar\", [2, np.nan, None, \"D\"])\n def test_constructor_invalid_items_unused(self, scalar):\n # No error if invalid (scalar) value is in fact not used:\n result = DataFrame({\"a\": scalar}, columns=[\"b\"])\n expected = DataFrame(columns=[\"b\"])\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"value\", [2, np.nan, None, float(\"nan\")])\n def test_constructor_dict_nan_key(self, value):\n # GH 18455\n cols = [1, value, 3]\n idx = [\"a\", value]\n values = [[0, 3], [1, 4], [2, 5]]\n data = {cols[c]: Series(values[c], index=idx) for c in range(3)}\n result = DataFrame(data).sort_values(1).sort_values(\"a\", axis=1)\n expected = DataFrame(\n np.arange(6, dtype=\"int64\").reshape(2, 3), index=idx, columns=cols\n )\n tm.assert_frame_equal(result, expected)\n\n result = DataFrame(data, index=idx).sort_values(\"a\", axis=1)\n tm.assert_frame_equal(result, expected)\n\n result = DataFrame(data, index=idx, columns=cols)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"value\", [np.nan, None, float(\"nan\")])\n def test_constructor_dict_nan_tuple_key(self, value):\n # GH 18455\n cols = Index([(11, 21), (value, 22), (13, value)])\n idx = Index([(\"a\", value), (value, 2)])\n values = [[0, 3], [1, 4], [2, 5]]\n data = {cols[c]: Series(values[c], index=idx) for c in range(3)}\n result = DataFrame(data).sort_values((11, 21)).sort_values((\"a\", value), axis=1)\n expected = DataFrame(\n np.arange(6, dtype=\"int64\").reshape(2, 3), index=idx, columns=cols\n )\n tm.assert_frame_equal(result, expected)\n\n result = DataFrame(data, index=idx).sort_values((\"a\", value), axis=1)\n tm.assert_frame_equal(result, expected)\n\n result = DataFrame(data, index=idx, columns=cols)\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_dict_order_insertion(self):\n datetime_series = tm.makeTimeSeries(nper=30)\n datetime_series_short = tm.makeTimeSeries(nper=25)\n\n # GH19018\n # initialization ordering: by insertion order if python>= 3.6\n d = {\"b\": datetime_series_short, \"a\": datetime_series}\n frame = DataFrame(data=d)\n expected = DataFrame(data=d, columns=list(\"ba\"))\n tm.assert_frame_equal(frame, expected)\n\n def test_constructor_multi_index(self):\n # GH 4078\n # construction error with mi and all-nan frame\n tuples = [(2, 3), (3, 3), (3, 3)]\n mi = MultiIndex.from_tuples(tuples)\n df = DataFrame(index=mi, columns=mi)\n assert pd.isna(df).values.ravel().all()\n\n tuples = [(3, 3), (2, 3), (3, 3)]\n mi = MultiIndex.from_tuples(tuples)\n df = DataFrame(index=mi, columns=mi)\n assert pd.isna(df).values.ravel().all()\n\n def test_constructor_2d_index(self):\n # GH 25416\n # handling of 2d index in construction\n df = pd.DataFrame([[1]], columns=[[1]], index=[1, 2])\n expected = pd.DataFrame(\n [1, 1],\n index=pd.Int64Index([1, 2], dtype=\"int64\"),\n columns=pd.MultiIndex(levels=[[1]], codes=[[0]]),\n )\n tm.assert_frame_equal(df, expected)\n\n df = pd.DataFrame([[1]], columns=[[1]], index=[[1, 2]])\n expected = pd.DataFrame(\n [1, 1],\n index=pd.MultiIndex(levels=[[1, 2]], codes=[[0, 1]]),\n columns=pd.MultiIndex(levels=[[1]], codes=[[0]]),\n )\n tm.assert_frame_equal(df, expected)\n\n def test_constructor_error_msgs(self):\n msg = \"Empty data passed with indices specified.\"\n # passing an empty array with columns specified.\n with pytest.raises(ValueError, match=msg):\n DataFrame(np.empty(0), columns=list(\"abc\"))\n\n msg = \"Mixing dicts with non-Series may lead to ambiguous ordering.\"\n # mix dict and array, wrong size\n with pytest.raises(ValueError, match=msg):\n DataFrame({\"A\": {\"a\": \"a\", \"b\": \"b\"}, \"B\": [\"a\", \"b\", \"c\"]})\n\n # wrong size ndarray, GH 3105\n msg = r\"Shape of passed values is \\(4, 3\\), indices imply \\(3, 3\\)\"\n with pytest.raises(ValueError, match=msg):\n DataFrame(\n np.arange(12).reshape((4, 3)),\n columns=[\"foo\", \"bar\", \"baz\"],\n index=pd.date_range(\"2000-01-01\", periods=3),\n )\n\n arr = np.array([[4, 5, 6]])\n msg = r\"Shape of passed values is \\(1, 3\\), indices imply \\(1, 4\\)\"\n with pytest.raises(ValueError, match=msg):\n DataFrame(index=[0], columns=range(0, 4), data=arr)\n\n arr = np.array([4, 5, 6])\n msg = r\"Shape of passed values is \\(3, 1\\), indices imply \\(1, 4\\)\"\n with pytest.raises(ValueError, match=msg):\n DataFrame(index=[0], columns=range(0, 4), data=arr)\n\n # higher dim raise exception\n with pytest.raises(ValueError, match=\"Must pass 2-d input\"):\n DataFrame(np.zeros((3, 3, 3)), columns=[\"A\", \"B\", \"C\"], index=[1])\n\n # wrong size axis labels\n msg = \"Shape of passed values \" r\"is \\(2, 3\\), indices \" r\"imply \\(1, 3\\)\"\n with pytest.raises(ValueError, match=msg):\n DataFrame(np.random.rand(2, 3), columns=[\"A\", \"B\", \"C\"], index=[1])\n\n msg = \"Shape of passed values \" r\"is \\(2, 3\\), indices \" r\"imply \\(2, 2\\)\"\n with pytest.raises(ValueError, match=msg):\n DataFrame(np.random.rand(2, 3), columns=[\"A\", \"B\"], index=[1, 2])\n\n # gh-26429\n msg = \"2 columns passed, passed data had 10 columns\"\n with pytest.raises(ValueError, match=msg):\n DataFrame((range(10), range(10, 20)), columns=(\"ones\", \"twos\"))\n\n msg = \"If using all scalar values, you must pass an index\"\n with pytest.raises(ValueError, match=msg):\n DataFrame({\"a\": False, \"b\": True})\n\n def test_constructor_with_embedded_frames(self):\n\n # embedded data frames\n df1 = DataFrame({\"a\": [1, 2, 3], \"b\": [3, 4, 5]})\n df2 = DataFrame([df1, df1 + 10])\n\n df2.dtypes\n str(df2)\n\n result = df2.loc[0, 0]\n tm.assert_frame_equal(result, df1)\n\n result = df2.loc[1, 0]\n tm.assert_frame_equal(result, df1 + 10)\n\n def test_constructor_subclass_dict(self, float_frame):\n # Test for passing dict subclass to constructor\n data = {\n \"col1\": tm.TestSubDict((x, 10.0 * x) for x in range(10)),\n \"col2\": tm.TestSubDict((x, 20.0 * x) for x in range(10)),\n }\n df = DataFrame(data)\n refdf = DataFrame({col: dict(val.items()) for col, val in data.items()})\n tm.assert_frame_equal(refdf, df)\n\n data = tm.TestSubDict(data.items())\n df = DataFrame(data)\n tm.assert_frame_equal(refdf, df)\n\n # try with defaultdict\n from collections import defaultdict\n\n data = {}\n float_frame[\"B\"][:10] = np.nan\n for k, v in float_frame.items():\n dct = defaultdict(dict)\n dct.update(v.to_dict())\n data[k] = dct\n frame = DataFrame(data)\n expected = frame.reindex(index=float_frame.index)\n tm.assert_frame_equal(float_frame, expected)\n\n def test_constructor_dict_block(self):\n expected = np.array([[4.0, 3.0, 2.0, 1.0]])\n df = DataFrame(\n {\"d\": [4.0], \"c\": [3.0], \"b\": [2.0], \"a\": [1.0]},\n columns=[\"d\", \"c\", \"b\", \"a\"],\n )\n tm.assert_numpy_array_equal(df.values, expected)\n\n def test_constructor_dict_cast(self):\n # cast float tests\n test_data = {\"A\": {\"1\": 1, \"2\": 2}, \"B\": {\"1\": \"1\", \"2\": \"2\", \"3\": \"3\"}}\n frame = DataFrame(test_data, dtype=float)\n assert len(frame) == 3\n assert frame[\"B\"].dtype == np.float64\n assert frame[\"A\"].dtype == np.float64\n\n frame = DataFrame(test_data)\n assert len(frame) == 3\n assert frame[\"B\"].dtype == np.object_\n assert frame[\"A\"].dtype == np.float64\n\n # can't cast to float\n test_data = {\n \"A\": dict(zip(range(20), tm.makeStringIndex(20))),\n \"B\": dict(zip(range(15), np.random.randn(15))),\n }\n frame = DataFrame(test_data, dtype=float)\n assert len(frame) == 20\n assert frame[\"A\"].dtype == np.object_\n assert frame[\"B\"].dtype == np.float64\n\n def test_constructor_dict_dont_upcast(self):\n d = {\"Col1\": {\"Row1\": \"A String\", \"Row2\": np.nan}}\n df = DataFrame(d)\n assert isinstance(df[\"Col1\"][\"Row2\"], float)\n\n dm = DataFrame([[1, 2], [\"a\", \"b\"]], index=[1, 2], columns=[1, 2])\n assert isinstance(dm[1][1], int)\n\n def test_constructor_dict_of_tuples(self):\n # GH #1491\n data = {\"a\": (1, 2, 3), \"b\": (4, 5, 6)}\n\n result = DataFrame(data)\n expected = DataFrame({k: list(v) for k, v in data.items()})\n tm.assert_frame_equal(result, expected, check_dtype=False)\n\n def test_constructor_dict_of_ranges(self):\n # GH 26356\n data = {\"a\": range(3), \"b\": range(3, 6)}\n\n result = DataFrame(data)\n expected = DataFrame({\"a\": [0, 1, 2], \"b\": [3, 4, 5]})\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_dict_of_iterators(self):\n # GH 26349\n data = {\"a\": iter(range(3)), \"b\": reversed(range(3))}\n\n result = DataFrame(data)\n expected = DataFrame({\"a\": [0, 1, 2], \"b\": [2, 1, 0]})\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_dict_of_generators(self):\n # GH 26349\n data = {\"a\": (i for i in (range(3))), \"b\": (i for i in reversed(range(3)))}\n result = DataFrame(data)\n expected = DataFrame({\"a\": [0, 1, 2], \"b\": [2, 1, 0]})\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_dict_multiindex(self):\n def check(result, expected):\n return tm.assert_frame_equal(\n result,\n expected,\n check_dtype=True,\n check_index_type=True,\n check_column_type=True,\n check_names=True,\n )\n\n d = {\n (\"a\", \"a\"): {(\"i\", \"i\"): 0, (\"i\", \"j\"): 1, (\"j\", \"i\"): 2},\n (\"b\", \"a\"): {(\"i\", \"i\"): 6, (\"i\", \"j\"): 5, (\"j\", \"i\"): 4},\n (\"b\", \"c\"): {(\"i\", \"i\"): 7, (\"i\", \"j\"): 8, (\"j\", \"i\"): 9},\n }\n _d = sorted(d.items())\n df = DataFrame(d)\n expected = DataFrame(\n [x[1] for x in _d], index=MultiIndex.from_tuples([x[0] for x in _d])\n ).T\n expected.index = MultiIndex.from_tuples(expected.index)\n check(df, expected)\n\n d[\"z\"] = {\"y\": 123.0, (\"i\", \"i\"): 111, (\"i\", \"j\"): 111, (\"j\", \"i\"): 111}\n _d.insert(0, (\"z\", d[\"z\"]))\n expected = DataFrame(\n [x[1] for x in _d], index=Index([x[0] for x in _d], tupleize_cols=False)\n ).T\n expected.index = Index(expected.index, tupleize_cols=False)\n df = DataFrame(d)\n df = df.reindex(columns=expected.columns, index=expected.index)\n check(df, expected)\n\n def test_constructor_dict_datetime64_index(self):\n # GH 10160\n dates_as_str = [\"1984-02-19\", \"1988-11-06\", \"1989-12-03\", \"1990-03-15\"]\n\n def create_data(constructor):\n return {i: {constructor(s): 2 * i} for i, s in enumerate(dates_as_str)}\n\n data_datetime64 = create_data(np.datetime64)\n data_datetime = create_data(lambda x: datetime.strptime(x, \"%Y-%m-%d\"))\n data_Timestamp = create_data(Timestamp)\n\n expected = DataFrame(\n [\n {0: 0, 1: None, 2: None, 3: None},\n {0: None, 1: 2, 2: None, 3: None},\n {0: None, 1: None, 2: 4, 3: None},\n {0: None, 1: None, 2: None, 3: 6},\n ],\n index=[Timestamp(dt) for dt in dates_as_str],\n )\n\n result_datetime64 = DataFrame(data_datetime64)\n result_datetime = DataFrame(data_datetime)\n result_Timestamp = DataFrame(data_Timestamp)\n tm.assert_frame_equal(result_datetime64, expected)\n tm.assert_frame_equal(result_datetime, expected)\n tm.assert_frame_equal(result_Timestamp, expected)\n\n def test_constructor_dict_timedelta64_index(self):\n # GH 10160\n td_as_int = [1, 2, 3, 4]\n\n def create_data(constructor):\n return {i: {constructor(s): 2 * i} for i, s in enumerate(td_as_int)}\n\n data_timedelta64 = create_data(lambda x: np.timedelta64(x, \"D\"))\n data_timedelta = create_data(lambda x: timedelta(days=x))\n data_Timedelta = create_data(lambda x: Timedelta(x, \"D\"))\n\n expected = DataFrame(\n [\n {0: 0, 1: None, 2: None, 3: None},\n {0: None, 1: 2, 2: None, 3: None},\n {0: None, 1: None, 2: 4, 3: None},\n {0: None, 1: None, 2: None, 3: 6},\n ],\n index=[Timedelta(td, \"D\") for td in td_as_int],\n )\n\n result_timedelta64 = DataFrame(data_timedelta64)\n result_timedelta = DataFrame(data_timedelta)\n result_Timedelta = DataFrame(data_Timedelta)\n tm.assert_frame_equal(result_timedelta64, expected)\n tm.assert_frame_equal(result_timedelta, expected)\n tm.assert_frame_equal(result_Timedelta, expected)\n\n def test_constructor_period(self):\n # PeriodIndex\n a = pd.PeriodIndex([\"2012-01\", \"NaT\", \"2012-04\"], freq=\"M\")\n b = pd.PeriodIndex([\"2012-02-01\", \"2012-03-01\", \"NaT\"], freq=\"D\")\n df = pd.DataFrame({\"a\": a, \"b\": b})\n assert df[\"a\"].dtype == a.dtype\n assert df[\"b\"].dtype == b.dtype\n\n # list of periods\n df = pd.DataFrame(\n {\"a\": a.astype(object).tolist(), \"b\": b.astype(object).tolist()}\n )\n assert df[\"a\"].dtype == a.dtype\n assert df[\"b\"].dtype == b.dtype\n\n def test_nested_dict_frame_constructor(self):\n rng = pd.period_range(\"1/1/2000\", periods=5)\n df = DataFrame(np.random.randn(10, 5), columns=rng)\n\n data = {}\n for col in df.columns:\n for row in df.index:\n data.setdefault(col, {})[row] = df._get_value(row, col)\n\n result = DataFrame(data, columns=rng)\n tm.assert_frame_equal(result, df)\n\n data = {}\n for col in df.columns:\n for row in df.index:\n data.setdefault(row, {})[col] = df._get_value(row, col)\n\n result = DataFrame(data, index=rng).T\n tm.assert_frame_equal(result, df)\n\n def _check_basic_constructor(self, empty):\n # mat: 2d matrix with shape (3, 2) to input. empty - makes sized\n # objects\n mat = empty((2, 3), dtype=float)\n # 2-D input\n frame = DataFrame(mat, columns=[\"A\", \"B\", \"C\"], index=[1, 2])\n\n assert len(frame.index) == 2\n assert len(frame.columns) == 3\n\n # 1-D input\n frame = DataFrame(empty((3,)), columns=[\"A\"], index=[1, 2, 3])\n assert len(frame.index) == 3\n assert len(frame.columns) == 1\n\n # cast type\n frame = DataFrame(mat, columns=[\"A\", \"B\", \"C\"], index=[1, 2], dtype=np.int64)\n assert frame.values.dtype == np.int64\n\n # wrong size axis labels\n msg = r\"Shape of passed values is \\(2, 3\\), indices imply \\(1, 3\\)\"\n with pytest.raises(ValueError, match=msg):\n DataFrame(mat, columns=[\"A\", \"B\", \"C\"], index=[1])\n msg = r\"Shape of passed values is \\(2, 3\\), indices imply \\(2, 2\\)\"\n with pytest.raises(ValueError, match=msg):\n DataFrame(mat, columns=[\"A\", \"B\"], index=[1, 2])\n\n # higher dim raise exception\n with pytest.raises(ValueError, match=\"Must pass 2-d input\"):\n DataFrame(empty((3, 3, 3)), columns=[\"A\", \"B\", \"C\"], index=[1])\n\n # automatic labeling\n frame = DataFrame(mat)\n tm.assert_index_equal(frame.index, pd.Int64Index(range(2)))\n tm.assert_index_equal(frame.columns, pd.Int64Index(range(3)))\n\n frame = DataFrame(mat, index=[1, 2])\n tm.assert_index_equal(frame.columns, pd.Int64Index(range(3)))\n\n frame = DataFrame(mat, columns=[\"A\", \"B\", \"C\"])\n tm.assert_index_equal(frame.index, pd.Int64Index(range(2)))\n\n # 0-length axis\n frame = DataFrame(empty((0, 3)))\n assert len(frame.index) == 0\n\n frame = DataFrame(empty((3, 0)))\n assert len(frame.columns) == 0\n\n def test_constructor_ndarray(self):\n self._check_basic_constructor(np.ones)\n\n frame = DataFrame([\"foo\", \"bar\"], index=[0, 1], columns=[\"A\"])\n assert len(frame) == 2\n\n def test_constructor_maskedarray(self):\n self._check_basic_constructor(ma.masked_all)\n\n # Check non-masked values\n mat = ma.masked_all((2, 3), dtype=float)\n mat[0, 0] = 1.0\n mat[1, 2] = 2.0\n frame = DataFrame(mat, columns=[\"A\", \"B\", \"C\"], index=[1, 2])\n assert 1.0 == frame[\"A\"][1]\n assert 2.0 == frame[\"C\"][2]\n\n # what is this even checking??\n mat = ma.masked_all((2, 3), dtype=float)\n frame = DataFrame(mat, columns=[\"A\", \"B\", \"C\"], index=[1, 2])\n assert np.all(~np.asarray(frame == frame))\n\n def test_constructor_maskedarray_nonfloat(self):\n # masked int promoted to float\n mat = ma.masked_all((2, 3), dtype=int)\n # 2-D input\n frame = DataFrame(mat, columns=[\"A\", \"B\", \"C\"], index=[1, 2])\n\n assert len(frame.index) == 2\n assert len(frame.columns) == 3\n assert np.all(~np.asarray(frame == frame))\n\n # cast type\n frame = DataFrame(mat, columns=[\"A\", \"B\", \"C\"], index=[1, 2], dtype=np.float64)\n assert frame.values.dtype == np.float64\n\n # Check non-masked values\n mat2 = ma.copy(mat)\n mat2[0, 0] = 1\n mat2[1, 2] = 2\n frame = DataFrame(mat2, columns=[\"A\", \"B\", \"C\"], index=[1, 2])\n assert 1 == frame[\"A\"][1]\n assert 2 == frame[\"C\"][2]\n\n # masked np.datetime64 stays (use NaT as null)\n mat = ma.masked_all((2, 3), dtype=\"M8[ns]\")\n # 2-D input\n frame = DataFrame(mat, columns=[\"A\", \"B\", \"C\"], index=[1, 2])\n\n assert len(frame.index) == 2\n assert len(frame.columns) == 3\n assert isna(frame).values.all()\n\n # cast type\n frame = DataFrame(mat, columns=[\"A\", \"B\", \"C\"], index=[1, 2], dtype=np.int64)\n assert frame.values.dtype == np.int64\n\n # Check non-masked values\n mat2 = ma.copy(mat)\n mat2[0, 0] = 1\n mat2[1, 2] = 2\n frame = DataFrame(mat2, columns=[\"A\", \"B\", \"C\"], index=[1, 2])\n assert 1 == frame[\"A\"].view(\"i8\")[1]\n assert 2 == frame[\"C\"].view(\"i8\")[2]\n\n # masked bool promoted to object\n mat = ma.masked_all((2, 3), dtype=bool)\n # 2-D input\n frame = DataFrame(mat, columns=[\"A\", \"B\", \"C\"], index=[1, 2])\n\n assert len(frame.index) == 2\n assert len(frame.columns) == 3\n assert np.all(~np.asarray(frame == frame))\n\n # cast type\n frame = DataFrame(mat, columns=[\"A\", \"B\", \"C\"], index=[1, 2], dtype=object)\n assert frame.values.dtype == object\n\n # Check non-masked values\n mat2 = ma.copy(mat)\n mat2[0, 0] = True\n mat2[1, 2] = False\n frame = DataFrame(mat2, columns=[\"A\", \"B\", \"C\"], index=[1, 2])\n assert frame[\"A\"][1] is True\n assert frame[\"C\"][2] is False\n\n def test_constructor_maskedarray_hardened(self):\n # Check numpy masked arrays with hard masks -- from GH24574\n mat_hard = ma.masked_all((2, 2), dtype=float).harden_mask()\n result = pd.DataFrame(mat_hard, columns=[\"A\", \"B\"], index=[1, 2])\n expected = pd.DataFrame(\n {\"A\": [np.nan, np.nan], \"B\": [np.nan, np.nan]},\n columns=[\"A\", \"B\"],\n index=[1, 2],\n dtype=float,\n )\n tm.assert_frame_equal(result, expected)\n # Check case where mask is hard but no data are masked\n mat_hard = ma.ones((2, 2), dtype=float).harden_mask()\n result = pd.DataFrame(mat_hard, columns=[\"A\", \"B\"], index=[1, 2])\n expected = pd.DataFrame(\n {\"A\": [1.0, 1.0], \"B\": [1.0, 1.0]},\n columns=[\"A\", \"B\"],\n index=[1, 2],\n dtype=float,\n )\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_maskedrecarray_dtype(self):\n # Ensure constructor honors dtype\n data = np.ma.array(\n np.ma.zeros(5, dtype=[(\"date\", \"<f8\"), (\"price\", \"<f8\")]), mask=[False] * 5\n )\n data = data.view(mrecords.mrecarray)\n result = pd.DataFrame(data, dtype=int)\n expected = pd.DataFrame(np.zeros((5, 2), dtype=int), columns=[\"date\", \"price\"])\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_mrecarray(self):\n # Ensure mrecarray produces frame identical to dict of masked arrays\n # from GH3479\n\n assert_fr_equal = functools.partial(\n tm.assert_frame_equal,\n check_index_type=True,\n check_column_type=True,\n check_frame_type=True,\n )\n arrays = [\n (\"float\", np.array([1.5, 2.0])),\n (\"int\", np.array([1, 2])),\n (\"str\", np.array([\"abc\", \"def\"])),\n ]\n for name, arr in arrays[:]:\n arrays.append(\n (\"masked1_\" + name, np.ma.masked_array(arr, mask=[False, True]))\n )\n arrays.append((\"masked_all\", np.ma.masked_all((2,))))\n arrays.append((\"masked_none\", np.ma.masked_array([1.0, 2.5], mask=False)))\n\n # call assert_frame_equal for all selections of 3 arrays\n for comb in itertools.combinations(arrays, 3):\n names, data = zip(*comb)\n mrecs = mrecords.fromarrays(data, names=names)\n\n # fill the comb\n comb = {k: (v.filled() if hasattr(v, \"filled\") else v) for k, v in comb}\n\n expected = DataFrame(comb, columns=names)\n result = DataFrame(mrecs)\n assert_fr_equal(result, expected)\n\n # specify columns\n expected = DataFrame(comb, columns=names[::-1])\n result = DataFrame(mrecs, columns=names[::-1])\n assert_fr_equal(result, expected)\n\n # specify index\n expected = DataFrame(comb, columns=names, index=[1, 2])\n result = DataFrame(mrecs, index=[1, 2])\n assert_fr_equal(result, expected)\n\n def test_constructor_corner_shape(self):\n df = DataFrame(index=[])\n assert df.values.shape == (0, 0)\n\n @pytest.mark.parametrize(\n \"data, index, columns, dtype, expected\",\n [\n (None, list(range(10)), [\"a\", \"b\"], object, np.object_),\n (None, None, [\"a\", \"b\"], \"int64\", np.dtype(\"int64\")),\n (None, list(range(10)), [\"a\", \"b\"], int, np.dtype(\"float64\")),\n ({}, None, [\"foo\", \"bar\"], None, np.object_),\n ({\"b\": 1}, list(range(10)), list(\"abc\"), int, np.dtype(\"float64\")),\n ],\n )\n def test_constructor_dtype(self, data, index, columns, dtype, expected):\n df = DataFrame(data, index, columns, dtype)\n assert df.values.dtype == expected\n\n def test_constructor_scalar_inference(self):\n data = {\"int\": 1, \"bool\": True, \"float\": 3.0, \"complex\": 4j, \"object\": \"foo\"}\n df = DataFrame(data, index=np.arange(10))\n\n assert df[\"int\"].dtype == np.int64\n assert df[\"bool\"].dtype == np.bool_\n assert df[\"float\"].dtype == np.float64\n assert df[\"complex\"].dtype == np.complex128\n assert df[\"object\"].dtype == np.object_\n\n def test_constructor_arrays_and_scalars(self):\n df = DataFrame({\"a\": np.random.randn(10), \"b\": True})\n exp = DataFrame({\"a\": df[\"a\"].values, \"b\": [True] * 10})\n\n tm.assert_frame_equal(df, exp)\n with pytest.raises(ValueError, match=\"must pass an index\"):\n DataFrame({\"a\": False, \"b\": True})\n\n def test_constructor_DataFrame(self, float_frame):\n df = DataFrame(float_frame)\n tm.assert_frame_equal(df, float_frame)\n\n df_casted = DataFrame(float_frame, dtype=np.int64)\n assert df_casted.values.dtype == np.int64\n\n def test_constructor_more(self, float_frame):\n # used to be in test_matrix.py\n arr = np.random.randn(10)\n dm = DataFrame(arr, columns=[\"A\"], index=np.arange(10))\n assert dm.values.ndim == 2\n\n arr = np.random.randn(0)\n dm = DataFrame(arr)\n assert dm.values.ndim == 2\n assert dm.values.ndim == 2\n\n # no data specified\n dm = DataFrame(columns=[\"A\", \"B\"], index=np.arange(10))\n assert dm.values.shape == (10, 2)\n\n dm = DataFrame(columns=[\"A\", \"B\"])\n assert dm.values.shape == (0, 2)\n\n dm = DataFrame(index=np.arange(10))\n assert dm.values.shape == (10, 0)\n\n # can't cast\n mat = np.array([\"foo\", \"bar\"], dtype=object).reshape(2, 1)\n with pytest.raises(ValueError, match=\"cast\"):\n DataFrame(mat, index=[0, 1], columns=[0], dtype=float)\n\n dm = DataFrame(DataFrame(float_frame._series))\n tm.assert_frame_equal(dm, float_frame)\n\n # int cast\n dm = DataFrame(\n {\"A\": np.ones(10, dtype=int), \"B\": np.ones(10, dtype=np.float64)},\n index=np.arange(10),\n )\n\n assert len(dm.columns) == 2\n assert dm.values.dtype == np.float64\n\n def test_constructor_empty_list(self):\n df = DataFrame([], index=[])\n expected = DataFrame(index=[])\n tm.assert_frame_equal(df, expected)\n\n # GH 9939\n df = DataFrame([], columns=[\"A\", \"B\"])\n expected = DataFrame({}, columns=[\"A\", \"B\"])\n tm.assert_frame_equal(df, expected)\n\n # Empty generator: list(empty_gen()) == []\n def empty_gen():\n return\n yield\n\n df = DataFrame(empty_gen(), columns=[\"A\", \"B\"])\n tm.assert_frame_equal(df, expected)\n\n def test_constructor_list_of_lists(self):\n # GH #484\n df = DataFrame(data=[[1, \"a\"], [2, \"b\"]], columns=[\"num\", \"str\"])\n assert is_integer_dtype(df[\"num\"])\n assert df[\"str\"].dtype == np.object_\n\n # GH 4851\n # list of 0-dim ndarrays\n expected = DataFrame({0: np.arange(10)})\n data = [np.array(x) for x in range(10)]\n result = DataFrame(data)\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_sequence_like(self):\n # GH 3783\n # collections.Squence like\n\n class DummyContainer(abc.Sequence):\n def __init__(self, lst):\n self._lst = lst\n\n def __getitem__(self, n):\n return self._lst.__getitem__(n)\n\n def __len__(self, n):\n return self._lst.__len__()\n\n lst_containers = [DummyContainer([1, \"a\"]), DummyContainer([2, \"b\"])]\n columns = [\"num\", \"str\"]\n result = DataFrame(lst_containers, columns=columns)\n expected = DataFrame([[1, \"a\"], [2, \"b\"]], columns=columns)\n tm.assert_frame_equal(result, expected, check_dtype=False)\n\n # GH 4297\n # support Array\n import array\n\n result = DataFrame({\"A\": array.array(\"i\", range(10))})\n expected = DataFrame({\"A\": list(range(10))})\n tm.assert_frame_equal(result, expected, check_dtype=False)\n\n expected = DataFrame([list(range(10)), list(range(10))])\n result = DataFrame([array.array(\"i\", range(10)), array.array(\"i\", range(10))])\n tm.assert_frame_equal(result, expected, check_dtype=False)\n\n def test_constructor_range(self):\n # GH26342\n result = DataFrame(range(10))\n expected = DataFrame(list(range(10)))\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_list_of_ranges(self):\n result = DataFrame([range(10), range(10)])\n expected = DataFrame([list(range(10)), list(range(10))])\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_iterable(self):\n # GH 21987\n class Iter:\n def __iter__(self):\n for i in range(10):\n yield [1, 2, 3]\n\n expected = DataFrame([[1, 2, 3]] * 10)\n result = DataFrame(Iter())\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_iterator(self):\n result = DataFrame(iter(range(10)))\n expected = DataFrame(list(range(10)))\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_list_of_iterators(self):\n result = DataFrame([iter(range(10)), iter(range(10))])\n expected = DataFrame([list(range(10)), list(range(10))])\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_generator(self):\n # related #2305\n\n gen1 = (i for i in range(10))\n gen2 = (i for i in range(10))\n\n expected = DataFrame([list(range(10)), list(range(10))])\n result = DataFrame([gen1, gen2])\n tm.assert_frame_equal(result, expected)\n\n gen = ([i, \"a\"] for i in range(10))\n result = DataFrame(gen)\n expected = DataFrame({0: range(10), 1: \"a\"})\n tm.assert_frame_equal(result, expected, check_dtype=False)\n\n def test_constructor_list_of_odicts(self):\n data = [\n OrderedDict([[\"a\", 1.5], [\"b\", 3], [\"c\", 4], [\"d\", 6]]),\n OrderedDict([[\"a\", 1.5], [\"b\", 3], [\"d\", 6]]),\n OrderedDict([[\"a\", 1.5], [\"d\", 6]]),\n OrderedDict(),\n OrderedDict([[\"a\", 1.5], [\"b\", 3], [\"c\", 4]]),\n OrderedDict([[\"b\", 3], [\"c\", 4], [\"d\", 6]]),\n ]\n\n result = DataFrame(data)\n expected = DataFrame.from_dict(\n dict(zip(range(len(data)), data)), orient=\"index\"\n )\n tm.assert_frame_equal(result, expected.reindex(result.index))\n\n result = DataFrame([{}])\n expected = DataFrame(index=[0])\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_ordered_dict_preserve_order(self):\n # see gh-13304\n expected = DataFrame([[2, 1]], columns=[\"b\", \"a\"])\n\n data = OrderedDict()\n data[\"b\"] = [2]\n data[\"a\"] = [1]\n\n result = DataFrame(data)\n tm.assert_frame_equal(result, expected)\n\n data = OrderedDict()\n data[\"b\"] = 2\n data[\"a\"] = 1\n\n result = DataFrame([data])\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_ordered_dict_conflicting_orders(self):\n # the first dict element sets the ordering for the DataFrame,\n # even if there are conflicting orders from subsequent ones\n row_one = OrderedDict()\n row_one[\"b\"] = 2\n row_one[\"a\"] = 1\n\n row_two = OrderedDict()\n row_two[\"a\"] = 1\n row_two[\"b\"] = 2\n\n row_three = {\"b\": 2, \"a\": 1}\n\n expected = DataFrame([[2, 1], [2, 1]], columns=[\"b\", \"a\"])\n result = DataFrame([row_one, row_two])\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=[\"b\", \"a\"])\n result = DataFrame([row_one, row_two, row_three])\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_list_of_series(self):\n data = [\n OrderedDict([[\"a\", 1.5], [\"b\", 3.0], [\"c\", 4.0]]),\n OrderedDict([[\"a\", 1.5], [\"b\", 3.0], [\"c\", 6.0]]),\n ]\n sdict = OrderedDict(zip([\"x\", \"y\"], data))\n idx = Index([\"a\", \"b\", \"c\"])\n\n # all named\n data2 = [\n Series([1.5, 3, 4], idx, dtype=\"O\", name=\"x\"),\n Series([1.5, 3, 6], idx, name=\"y\"),\n ]\n result = DataFrame(data2)\n expected = DataFrame.from_dict(sdict, orient=\"index\")\n tm.assert_frame_equal(result, expected)\n\n # some unnamed\n data2 = [\n Series([1.5, 3, 4], idx, dtype=\"O\", name=\"x\"),\n Series([1.5, 3, 6], idx),\n ]\n result = DataFrame(data2)\n\n sdict = OrderedDict(zip([\"x\", \"Unnamed 0\"], data))\n expected = DataFrame.from_dict(sdict, orient=\"index\")\n tm.assert_frame_equal(result, expected)\n\n # none named\n data = [\n OrderedDict([[\"a\", 1.5], [\"b\", 3], [\"c\", 4], [\"d\", 6]]),\n OrderedDict([[\"a\", 1.5], [\"b\", 3], [\"d\", 6]]),\n OrderedDict([[\"a\", 1.5], [\"d\", 6]]),\n OrderedDict(),\n OrderedDict([[\"a\", 1.5], [\"b\", 3], [\"c\", 4]]),\n OrderedDict([[\"b\", 3], [\"c\", 4], [\"d\", 6]]),\n ]\n data = [\n create_series_with_explicit_dtype(d, dtype_if_empty=object) for d in data\n ]\n\n result = DataFrame(data)\n sdict = OrderedDict(zip(range(len(data)), data))\n expected = DataFrame.from_dict(sdict, orient=\"index\")\n tm.assert_frame_equal(result, expected.reindex(result.index))\n\n result2 = DataFrame(data, index=np.arange(6))\n tm.assert_frame_equal(result, result2)\n\n result = DataFrame([Series(dtype=object)])\n expected = DataFrame(index=[0])\n tm.assert_frame_equal(result, expected)\n\n data = [\n OrderedDict([[\"a\", 1.5], [\"b\", 3.0], [\"c\", 4.0]]),\n OrderedDict([[\"a\", 1.5], [\"b\", 3.0], [\"c\", 6.0]]),\n ]\n sdict = OrderedDict(zip(range(len(data)), data))\n\n idx = Index([\"a\", \"b\", \"c\"])\n data2 = [Series([1.5, 3, 4], idx, dtype=\"O\"), Series([1.5, 3, 6], idx)]\n result = DataFrame(data2)\n expected = DataFrame.from_dict(sdict, orient=\"index\")\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_list_of_series_aligned_index(self):\n series = [pd.Series(i, index=[\"b\", \"a\", \"c\"], name=str(i)) for i in range(3)]\n result = pd.DataFrame(series)\n expected = pd.DataFrame(\n {\"b\": [0, 1, 2], \"a\": [0, 1, 2], \"c\": [0, 1, 2]},\n columns=[\"b\", \"a\", \"c\"],\n index=[\"0\", \"1\", \"2\"],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_list_of_derived_dicts(self):\n class CustomDict(dict):\n pass\n\n d = {\"a\": 1.5, \"b\": 3}\n\n data_custom = [CustomDict(d)]\n data = [d]\n\n result_custom = DataFrame(data_custom)\n result = DataFrame(data)\n tm.assert_frame_equal(result, result_custom)\n\n def test_constructor_ragged(self):\n data = {\"A\": np.random.randn(10), \"B\": np.random.randn(8)}\n with pytest.raises(ValueError, match=\"arrays must all be same length\"):\n DataFrame(data)\n\n def test_constructor_scalar(self):\n idx = Index(range(3))\n df = DataFrame({\"a\": 0}, index=idx)\n expected = DataFrame({\"a\": [0, 0, 0]}, index=idx)\n tm.assert_frame_equal(df, expected, check_dtype=False)\n\n def test_constructor_Series_copy_bug(self, float_frame):\n df = DataFrame(float_frame[\"A\"], index=float_frame.index, columns=[\"A\"])\n df.copy()\n\n def test_constructor_mixed_dict_and_Series(self):\n data = {}\n data[\"A\"] = {\"foo\": 1, \"bar\": 2, \"baz\": 3}\n data[\"B\"] = Series([4, 3, 2, 1], index=[\"bar\", \"qux\", \"baz\", \"foo\"])\n\n result = DataFrame(data)\n assert result.index.is_monotonic\n\n # ordering ambiguous, raise exception\n with pytest.raises(ValueError, match=\"ambiguous ordering\"):\n DataFrame({\"A\": [\"a\", \"b\"], \"B\": {\"a\": \"a\", \"b\": \"b\"}})\n\n # this is OK though\n result = DataFrame({\"A\": [\"a\", \"b\"], \"B\": Series([\"a\", \"b\"], index=[\"a\", \"b\"])})\n expected = DataFrame({\"A\": [\"a\", \"b\"], \"B\": [\"a\", \"b\"]}, index=[\"a\", \"b\"])\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_mixed_type_rows(self):\n # Issue 25075\n data = [[1, 2], (3, 4)]\n result = DataFrame(data)\n expected = DataFrame([[1, 2], [3, 4]])\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"tuples,lists\",\n [\n ((), []),\n ((()), []),\n (((), ()), [(), ()]),\n (((), ()), [[], []]),\n (([], []), [[], []]),\n (([1, 2, 3], [4, 5, 6]), [[1, 2, 3], [4, 5, 6]]),\n ],\n )\n def test_constructor_tuple(self, tuples, lists):\n # GH 25691\n result = DataFrame(tuples)\n expected = DataFrame(lists)\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_list_of_tuples(self):\n result = DataFrame({\"A\": [(1, 2), (3, 4)]})\n expected = DataFrame({\"A\": Series([(1, 2), (3, 4)])})\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_list_of_namedtuples(self):\n # GH11181\n from collections import namedtuple\n\n named_tuple = namedtuple(\"Pandas\", list(\"ab\"))\n tuples = [named_tuple(1, 3), named_tuple(2, 4)]\n expected = DataFrame({\"a\": [1, 2], \"b\": [3, 4]})\n result = DataFrame(tuples)\n tm.assert_frame_equal(result, expected)\n\n # with columns\n expected = DataFrame({\"y\": [1, 2], \"z\": [3, 4]})\n result = DataFrame(tuples, columns=[\"y\", \"z\"])\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_list_of_dict_order(self):\n # GH10056\n data = [\n {\"First\": 1, \"Second\": 4, \"Third\": 7, \"Fourth\": 10},\n {\"Second\": 5, \"First\": 2, \"Fourth\": 11, \"Third\": 8},\n {\"Second\": 6, \"First\": 3, \"Fourth\": 12, \"Third\": 9, \"YYY\": 14, \"XXX\": 13},\n ]\n expected = DataFrame(\n {\n \"First\": [1, 2, 3],\n \"Second\": [4, 5, 6],\n \"Third\": [7, 8, 9],\n \"Fourth\": [10, 11, 12],\n \"YYY\": [None, None, 14],\n \"XXX\": [None, None, 13],\n }\n )\n result = DataFrame(data)\n tm.assert_frame_equal(result, expected)\n\n def test_constructor_orient(self, float_string_frame):\n data_dict = float_string_frame.T._series\n recons = DataFrame.from_dict(data_dict, orient=\"index\")\n expected = float_string_frame.reindex(index=recons.index)\n tm.assert_frame_equal(recons, expected)\n\n # dict of sequence\n a = {\"hi\": [32, 3, 3], \"there\": [3, 5, 3]}\n rs = DataFrame.from_dict(a, orient=\"index\")\n xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))\n tm.assert_frame_equal(rs, xp)\n\n def test_constructor_from_ordered_dict(self):\n # GH8425\n a = OrderedDict(\n [\n (\"one\", OrderedDict([(\"col_a\", \"foo1\"), (\"col_b\", \"bar1\")])),\n (\"two\", OrderedDict([(\"col_a\", \"foo2\"), (\"col_b\", \"bar2\")])),\n (\"three\", OrderedDict([(\"col_a\", \"foo3\"), (\"col_b\", \"bar3\")])),\n ]\n )\n expected = DataFrame.from_dict(a, orient=\"columns\").T\n result = DataFrame.from_dict(a, orient=\"index\")\n tm.assert_frame_equal(result, expected)\n\n def test_from_dict_columns_parameter(self):\n # GH 18529\n # Test new columns parameter for from_dict that was added to make\n # from_items(..., orient='index', columns=[...]) easier to replicate\n result = DataFrame.from_dict(\n OrderedDict([(\"A\", [1, 2]), (\"B\", [4, 5])]),\n orient=\"index\",\n columns=[\"one\", \"two\"],\n )\n expected = DataFrame([[1, 2], [4, 5]], index=[\"A\", \"B\"], columns=[\"one\", \"two\"])\n tm.assert_frame_equal(result, expected)\n\n msg = \"cannot use columns parameter with orient='columns'\"\n with pytest.raises(ValueError, match=msg):\n DataFrame.from_dict(\n dict([(\"A\", [1, 2]), (\"B\", [4, 5])]),\n orient=\"columns\",\n columns=[\"one\", \"two\"],\n )\n with pytest.raises(ValueError, match=msg):\n DataFrame.from_dict(\n dict([(\"A\", [1, 2]), (\"B\", [4, 5])]), columns=[\"one\", \"two\"]\n )\n\n @pytest.mark.parametrize(\n \"data_dict, keys\",\n [\n ([{(\"a\",): 1}, {(\"a\",): 2}], [(\"a\",)]),\n ([OrderedDict([((\"a\",), 1), ((\"b\",), 2)])], [(\"a\",), (\"b\",)]),\n ([{(\"a\", \"b\"): 1}], [(\"a\", \"b\")]),\n ],\n )\n def test_constructor_from_dict_tuples(self, data_dict, keys):\n # GH 16769\n df = DataFrame.from_dict(data_dict)\n\n result = df.columns\n expected = Index(keys, dtype=\"object\", tupleize_cols=False)\n\n tm.assert_index_equal(result, expected)\n\n def test_constructor_Series_named(self):\n a = Series([1, 2, 3], index=[\"a\", \"b\", \"c\"], name=\"x\")\n df = DataFrame(a)\n assert df.columns[0] == \"x\"\n tm.assert_index_equal(df.index, a.index)\n\n # ndarray like\n arr = np.random.randn(10)\n s = Series(arr, name=\"x\")\n df = DataFrame(s)\n expected = DataFrame(dict(x=s))\n tm.assert_frame_equal(df, expected)\n\n s = Series(arr, index=range(3, 13))\n df = DataFrame(s)\n expected = DataFrame({0: s})\n tm.assert_frame_equal(df, expected)\n\n msg = r\"Shape of passed values is \\(10, 1\\), indices imply \\(10, 2\\)\"\n with pytest.raises(ValueError, match=msg):\n DataFrame(s, columns=[1, 2])\n\n # #2234\n a = Series([], name=\"x\", dtype=object)\n df = DataFrame(a)\n assert df.columns[0] == \"x\"\n\n # series with name and w/o\n s1 = Series(arr, name=\"x\")\n df = DataFrame([s1, arr]).T\n expected = DataFrame({\"x\": s1, \"Unnamed 0\": arr}, columns=[\"x\", \"Unnamed 0\"])\n tm.assert_frame_equal(df, expected)\n\n # this is a bit non-intuitive here; the series collapse down to arrays\n df = DataFrame([arr, s1]).T\n expected = DataFrame({1: s1, 0: arr}, columns=[0, 1])\n tm.assert_frame_equal(df, expected)\n\n def test_constructor_Series_named_and_columns(self):\n # GH 9232 validation\n\n s0 = Series(range(5), name=0)\n s1 = Series(range(5), name=1)\n\n # matching name and column gives standard frame\n tm.assert_frame_equal(pd.DataFrame(s0, columns=[0]), s0.to_frame())\n tm.assert_frame_equal(pd.DataFrame(s1, columns=[1]), s1.to_frame())\n\n # non-matching produces empty frame\n assert pd.DataFrame(s0, columns=[1]).empty\n assert pd.DataFrame(s1, columns=[0]).empty\n\n def test_constructor_Series_differently_indexed(self):\n # name\n s1 = Series([1, 2, 3], index=[\"a\", \"b\", \"c\"], name=\"x\")\n\n # no name\n s2 = Series([1, 2, 3], index=[\"a\", \"b\", \"c\"])\n\n other_index = Index([\"a\", \"b\"])\n\n df1 = DataFrame(s1, index=other_index)\n exp1 = DataFrame(s1.reindex(other_index))\n assert df1.columns[0] == \"x\"\n tm.assert_frame_equal(df1, exp1)\n\n df2 = DataFrame(s2, index=other_index)\n exp2 = DataFrame(s2.reindex(other_index))\n assert df2.columns[0] == 0\n tm.assert_index_equal(df2.index, other_index)\n tm.assert_frame_equal(df2, exp2)\n\n def test_constructor_manager_resize(self, float_frame):\n index = list(float_frame.index[:5])\n columns = list(float_frame.columns[:3])\n\n result = DataFrame(float_frame._data, index=index, columns=columns)\n tm.assert_index_equal(result.index, Index(index))\n tm.assert_index_equal(result.columns, Index(columns))\n\n def test_constructor_mix_series_nonseries(self, float_frame):\n df = DataFrame(\n {\"A\": float_frame[\"A\"], \"B\": list(float_frame[\"B\"])}, columns=[\"A\", \"B\"]\n )\n tm.assert_frame_equal(df, float_frame.loc[:, [\"A\", \"B\"]])\n\n msg = \"does not match index length\"\n with pytest.raises(ValueError, match=msg):\n DataFrame({\"A\": float_frame[\"A\"], \"B\": list(float_frame[\"B\"])[:-2]})\n\n def test_constructor_miscast_na_int_dtype(self):\n df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)\n expected = DataFrame([[np.nan, 1], [1, 0]])\n tm.assert_frame_equal(df, expected)\n\n def test_constructor_column_duplicates(self):\n # it works! #2079\n df = DataFrame([[8, 5]], columns=[\"a\", \"a\"])\n edf = DataFrame([[8, 5]])\n edf.columns = [\"a\", \"a\"]\n\n tm.assert_frame_equal(df, edf)\n\n idf = DataFrame.from_records([(8, 5)], columns=[\"a\", \"a\"])\n\n tm.assert_frame_equal(idf, edf)\n\n msg = \"If using all scalar values, you must pass an index\"\n with pytest.raises(ValueError, match=msg):\n DataFrame.from_dict(OrderedDict([(\"b\", 8), (\"a\", 5), (\"a\", 6)]))\n\n def test_constructor_empty_with_string_dtype(self):\n # GH 9428\n expected = DataFrame(index=[0, 1], columns=[0, 1], dtype=object)\n\n df = DataFrame(index=[0, 1], columns=[0, 1], dtype=str)\n tm.assert_frame_equal(df, expected)\n df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.str_)\n tm.assert_frame_equal(df, expected)\n df = DataFrame(index=[0, 1], columns=[0, 1], dtype=np.unicode_)\n tm.assert_frame_equal(df, expected)\n df = DataFrame(index=[0, 1], columns=[0, 1], dtype=\"U5\")\n tm.assert_frame_equal(df, expected)\n\n def test_constructor_single_value(self):\n # expecting single value upcasting here\n df = DataFrame(0.0, index=[1, 2, 3], columns=[\"a\", \"b\", \"c\"])\n tm.assert_frame_equal(\n df, DataFrame(np.zeros(df.shape).astype(\"float64\"), df.index, df.columns)\n )\n\n df = DataFrame(0, index=[1, 2, 3], columns=[\"a\", \"b\", \"c\"])\n tm.assert_frame_equal(\n df, DataFrame(np.zeros(df.shape).astype(\"int64\"), df.index, df.columns)\n )\n\n df = DataFrame(\"a\", index=[1, 2], columns=[\"a\", \"c\"])\n tm.assert_frame_equal(\n df,\n DataFrame(\n np.array([[\"a\", \"a\"], [\"a\", \"a\"]], dtype=object),\n index=[1, 2],\n columns=[\"a\", \"c\"],\n ),\n )\n\n msg = \"DataFrame constructor not properly called!\"\n with pytest.raises(ValueError, match=msg):\n DataFrame(\"a\", [1, 2])\n with pytest.raises(ValueError, match=msg):\n DataFrame(\"a\", columns=[\"a\", \"c\"])\n\n msg = \"incompatible data and dtype\"\n with pytest.raises(TypeError, match=msg):\n DataFrame(\"a\", [1, 2], [\"a\", \"c\"], float)\n\n def test_constructor_with_datetimes(self):\n intname = np.dtype(np.int_).name\n floatname = np.dtype(np.float_).name\n datetime64name = np.dtype(\"M8[ns]\").name\n objectname = np.dtype(np.object_).name\n\n # single item\n df = DataFrame(\n {\n \"A\": 1,\n \"B\": \"foo\",\n \"C\": \"bar\",\n \"D\": Timestamp(\"20010101\"),\n \"E\": datetime(2001, 1, 2, 0, 0),\n },\n index=np.arange(10),\n )\n result = df.dtypes\n expected = Series(\n [np.dtype(\"int64\")]\n + [np.dtype(objectname)] * 2\n + [np.dtype(datetime64name)] * 2,\n index=list(\"ABCDE\"),\n )\n tm.assert_series_equal(result, expected)\n\n # check with ndarray construction ndim==0 (e.g. we are passing a ndim 0\n # ndarray with a dtype specified)\n df = DataFrame(\n {\n \"a\": 1.0,\n \"b\": 2,\n \"c\": \"foo\",\n floatname: np.array(1.0, dtype=floatname),\n intname: np.array(1, dtype=intname),\n },\n index=np.arange(10),\n )\n result = df.dtypes\n expected = Series(\n [np.dtype(\"float64\")]\n + [np.dtype(\"int64\")]\n + [np.dtype(\"object\")]\n + [np.dtype(\"float64\")]\n + [np.dtype(intname)],\n index=[\"a\", \"b\", \"c\", floatname, intname],\n )\n tm.assert_series_equal(result, expected)\n\n # check with ndarray construction ndim>0\n df = DataFrame(\n {\n \"a\": 1.0,\n \"b\": 2,\n \"c\": \"foo\",\n floatname: np.array([1.0] * 10, dtype=floatname),\n intname: np.array([1] * 10, dtype=intname),\n },\n index=np.arange(10),\n )\n result = df.dtypes\n expected = Series(\n [np.dtype(\"float64\")]\n + [np.dtype(\"int64\")]\n + [np.dtype(\"object\")]\n + [np.dtype(\"float64\")]\n + [np.dtype(intname)],\n index=[\"a\", \"b\", \"c\", floatname, intname],\n )\n tm.assert_series_equal(result, expected)\n\n # GH 2809\n ind = date_range(start=\"2000-01-01\", freq=\"D\", periods=10)\n datetimes = [ts.to_pydatetime() for ts in ind]\n datetime_s = Series(datetimes)\n assert datetime_s.dtype == \"M8[ns]\"\n\n # GH 2810\n ind = date_range(start=\"2000-01-01\", freq=\"D\", periods=10)\n datetimes = [ts.to_pydatetime() for ts in ind]\n dates = [ts.date() for ts in ind]\n df = DataFrame(datetimes, columns=[\"datetimes\"])\n df[\"dates\"] = dates\n result = df.dtypes\n expected = Series(\n [np.dtype(\"datetime64[ns]\"), np.dtype(\"object\")],\n index=[\"datetimes\", \"dates\"],\n )\n tm.assert_series_equal(result, expected)\n\n # GH 7594\n # don't coerce tz-aware\n import pytz\n\n tz = pytz.timezone(\"US/Eastern\")\n dt = tz.localize(datetime(2012, 1, 1))\n\n df = DataFrame({\"End Date\": dt}, index=[0])\n assert df.iat[0, 0] == dt\n tm.assert_series_equal(\n df.dtypes, Series({\"End Date\": \"datetime64[ns, US/Eastern]\"})\n )\n\n df = DataFrame([{\"End Date\": dt}])\n assert df.iat[0, 0] == dt\n tm.assert_series_equal(\n df.dtypes, Series({\"End Date\": \"datetime64[ns, US/Eastern]\"})\n )\n\n # tz-aware (UTC and other tz's)\n # GH 8411\n dr = date_range(\"20130101\", periods=3)\n df = DataFrame({\"value\": dr})\n assert df.iat[0, 0].tz is None\n dr = date_range(\"20130101\", periods=3, tz=\"UTC\")\n df = DataFrame({\"value\": dr})\n assert str(df.iat[0, 0].tz) == \"UTC\"\n dr = date_range(\"20130101\", periods=3, tz=\"US/Eastern\")\n df = DataFrame({\"value\": dr})\n assert str(df.iat[0, 0].tz) == \"US/Eastern\"\n\n # GH 7822\n # preserver an index with a tz on dict construction\n i = date_range(\"1/1/2011\", periods=5, freq=\"10s\", tz=\"US/Eastern\")\n\n expected = DataFrame({\"a\": i.to_series().reset_index(drop=True)})\n df = DataFrame()\n df[\"a\"] = i\n tm.assert_frame_equal(df, expected)\n\n df = DataFrame({\"a\": i})\n tm.assert_frame_equal(df, expected)\n\n # multiples\n i_no_tz = date_range(\"1/1/2011\", periods=5, freq=\"10s\")\n df = DataFrame({\"a\": i, \"b\": i_no_tz})\n expected = DataFrame({\"a\": i.to_series().reset_index(drop=True), \"b\": i_no_tz})\n tm.assert_frame_equal(df, expected)\n\n def test_constructor_datetimes_with_nulls(self):\n # gh-15869\n for arr in [\n np.array([None, None, None, None, datetime.now(), None]),\n np.array([None, None, datetime.now(), None]),\n ]:\n result = DataFrame(arr).dtypes\n expected = Series([np.dtype(\"datetime64[ns]\")])\n tm.assert_series_equal(result, expected)\n\n def test_constructor_for_list_with_dtypes(self):\n # test list of lists/ndarrays\n df = DataFrame([np.arange(5) for x in range(5)])\n result = df.dtypes\n expected = Series([np.dtype(\"int64\")] * 5)\n tm.assert_series_equal(result, expected)\n\n df = DataFrame([np.array(np.arange(5), dtype=\"int32\") for x in range(5)])\n result = df.dtypes\n expected = Series([np.dtype(\"int64\")] * 5)\n tm.assert_series_equal(result, expected)\n\n # overflow issue? (we always expecte int64 upcasting here)\n df = DataFrame({\"a\": [2 ** 31, 2 ** 31 + 1]})\n assert df.dtypes.iloc[0] == np.dtype(\"int64\")\n\n # GH #2751 (construction with no index specified), make sure we cast to\n # platform values\n df = DataFrame([1, 2])\n assert df.dtypes.iloc[0] == np.dtype(\"int64\")\n\n df = DataFrame([1.0, 2.0])\n assert df.dtypes.iloc[0] == np.dtype(\"float64\")\n\n df = DataFrame({\"a\": [1, 2]})\n assert df.dtypes.iloc[0] == np.dtype(\"int64\")\n\n df = DataFrame({\"a\": [1.0, 2.0]})\n assert df.dtypes.iloc[0] == np.dtype(\"float64\")\n\n df = DataFrame({\"a\": 1}, index=range(3))\n assert df.dtypes.iloc[0] == np.dtype(\"int64\")\n\n df = DataFrame({\"a\": 1.0}, index=range(3))\n assert df.dtypes.iloc[0] == np.dtype(\"float64\")\n\n # with object list\n df = DataFrame(\n {\n \"a\": [1, 2, 4, 7],\n \"b\": [1.2, 2.3, 5.1, 6.3],\n \"c\": list(\"abcd\"),\n \"d\": [datetime(2000, 1, 1) for i in range(4)],\n \"e\": [1.0, 2, 4.0, 7],\n }\n )\n result = df.dtypes\n expected = Series(\n [\n np.dtype(\"int64\"),\n np.dtype(\"float64\"),\n np.dtype(\"object\"),\n np.dtype(\"datetime64[ns]\"),\n np.dtype(\"float64\"),\n ],\n index=list(\"abcde\"),\n )\n tm.assert_series_equal(result, expected)\n\n def test_constructor_frame_copy(self, float_frame):\n cop = DataFrame(float_frame, copy=True)\n cop[\"A\"] = 5\n assert (cop[\"A\"] == 5).all()\n assert not (float_frame[\"A\"] == 5).all()\n\n def test_constructor_ndarray_copy(self, float_frame):\n df = DataFrame(float_frame.values)\n\n float_frame.values[5] = 5\n assert (df.values[5] == 5).all()\n\n df = DataFrame(float_frame.values, copy=True)\n float_frame.values[6] = 6\n assert not (df.values[6] == 6).all()\n\n def test_constructor_series_copy(self, float_frame):\n series = float_frame._series\n\n df = DataFrame({\"A\": series[\"A\"]})\n df[\"A\"][:] = 5\n\n assert not (series[\"A\"] == 5).all()\n\n def test_constructor_with_nas(self):\n # GH 5016\n # na's in indices\n\n def check(df):\n for i in range(len(df.columns)):\n df.iloc[:, i]\n\n indexer = np.arange(len(df.columns))[isna(df.columns)]\n\n # No NaN found -> error\n if len(indexer) == 0:\n msg = (\n \"cannot do label indexing on\"\n r\" <class 'pandas\\.core\\.indexes\\.range\\.RangeIndex'>\"\n r\" with these indexers \\[nan\\] of <class 'float'>\"\n )\n with pytest.raises(TypeError, match=msg):\n df.loc[:, np.nan]\n # single nan should result in Series\n elif len(indexer) == 1:\n tm.assert_series_equal(df.iloc[:, indexer[0]], df.loc[:, np.nan])\n # multiple nans should result in DataFrame\n else:\n tm.assert_frame_equal(df.iloc[:, indexer], df.loc[:, np.nan])\n\n df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[1, np.nan])\n check(df)\n\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1.1, 2.2, np.nan])\n check(df)\n\n df = DataFrame([[0, 1, 2, 3], [4, 5, 6, 7]], columns=[np.nan, 1.1, 2.2, np.nan])\n check(df)\n\n df = DataFrame(\n [[0.0, 1, 2, 3.0], [4, 5, 6, 7]], columns=[np.nan, 1.1, 2.2, np.nan]\n )\n check(df)\n\n # GH 21428 (non-unique columns)\n df = DataFrame([[0.0, 1, 2, 3.0], [4, 5, 6, 7]], columns=[np.nan, 1, 2, 2])\n check(df)\n\n def test_constructor_lists_to_object_dtype(self):\n # from #1074\n d = DataFrame({\"a\": [np.nan, False]})\n assert d[\"a\"].dtype == np.object_\n assert not d[\"a\"][1]\n\n def test_constructor_categorical(self):\n\n # GH8626\n\n # dict creation\n df = DataFrame({\"A\": list(\"abc\")}, dtype=\"category\")\n expected = Series(list(\"abc\"), dtype=\"category\", name=\"A\")\n tm.assert_series_equal(df[\"A\"], expected)\n\n # to_frame\n s = Series(list(\"abc\"), dtype=\"category\")\n result = s.to_frame()\n expected = Series(list(\"abc\"), dtype=\"category\", name=0)\n tm.assert_series_equal(result[0], expected)\n result = s.to_frame(name=\"foo\")\n expected = Series(list(\"abc\"), dtype=\"category\", name=\"foo\")\n tm.assert_series_equal(result[\"foo\"], expected)\n\n # list-like creation\n df = DataFrame(list(\"abc\"), dtype=\"category\")\n expected = Series(list(\"abc\"), dtype=\"category\", name=0)\n tm.assert_series_equal(df[0], expected)\n\n # ndim != 1\n df = DataFrame([Categorical(list(\"abc\"))])\n expected = DataFrame({0: Series(list(\"abc\"), dtype=\"category\")})\n tm.assert_frame_equal(df, expected)\n\n df = DataFrame([Categorical(list(\"abc\")), Categorical(list(\"abd\"))])\n expected = DataFrame(\n {\n 0: Series(list(\"abc\"), dtype=\"category\"),\n 1: Series(list(\"abd\"), dtype=\"category\"),\n },\n columns=[0, 1],\n )\n tm.assert_frame_equal(df, expected)\n\n # mixed\n df = DataFrame([Categorical(list(\"abc\")), list(\"def\")])\n expected = DataFrame(\n {0: Series(list(\"abc\"), dtype=\"category\"), 1: list(\"def\")}, columns=[0, 1]\n )\n tm.assert_frame_equal(df, expected)\n\n # invalid (shape)\n msg = r\"Shape of passed values is \\(6, 2\\), indices imply \\(3, 2\\)\"\n with pytest.raises(ValueError, match=msg):\n DataFrame([Categorical(list(\"abc\")), Categorical(list(\"abdefg\"))])\n\n # ndim > 1\n msg = \"> 1 ndim Categorical are not supported at this time\"\n with pytest.raises(NotImplementedError, match=msg):\n Categorical(np.array([list(\"abcd\")]))\n\n def test_constructor_categorical_series(self):\n\n items = [1, 2, 3, 1]\n exp = Series(items).astype(\"category\")\n res = Series(items, dtype=\"category\")\n tm.assert_series_equal(res, exp)\n\n items = [\"a\", \"b\", \"c\", \"a\"]\n exp = Series(items).astype(\"category\")\n res = Series(items, dtype=\"category\")\n tm.assert_series_equal(res, exp)\n\n # insert into frame with different index\n # GH 8076\n index = date_range(\"20000101\", periods=3)\n expected = Series(\n Categorical(values=[np.nan, np.nan, np.nan], categories=[\"a\", \"b\", \"c\"])\n )\n expected.index = index\n\n expected = DataFrame({\"x\": expected})\n df = DataFrame({\"x\": Series([\"a\", \"b\", \"c\"], dtype=\"category\")}, index=index)\n tm.assert_frame_equal(df, expected)\n\n def test_from_records_to_records(self):\n # from numpy documentation\n arr = np.zeros((2,), dtype=(\"i4,f4,a10\"))\n arr[:] = [(1, 2.0, \"Hello\"), (2, 3.0, \"World\")]\n\n # TODO(wesm): unused\n frame = DataFrame.from_records(arr) # noqa\n\n index = pd.Index(np.arange(len(arr))[::-1])\n indexed_frame = DataFrame.from_records(arr, index=index)\n tm.assert_index_equal(indexed_frame.index, index)\n\n # without names, it should go to last ditch\n arr2 = np.zeros((2, 3))\n tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))\n\n # wrong length\n msg = r\"Shape of passed values is \\(2, 3\\), indices imply \\(1, 3\\)\"\n with pytest.raises(ValueError, match=msg):\n DataFrame.from_records(arr, index=index[:-1])\n\n indexed_frame = DataFrame.from_records(arr, index=\"f1\")\n\n # what to do?\n records = indexed_frame.to_records()\n assert len(records.dtype.names) == 3\n\n records = indexed_frame.to_records(index=False)\n assert len(records.dtype.names) == 2\n assert \"index\" not in records.dtype.names\n\n def test_from_records_nones(self):\n tuples = [(1, 2, None, 3), (1, 2, None, 3), (None, 2, 5, 3)]\n\n df = DataFrame.from_records(tuples, columns=[\"a\", \"b\", \"c\", \"d\"])\n assert np.isnan(df[\"c\"][0])\n\n def test_from_records_iterator(self):\n arr = np.array(\n [(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5.0, 5.0, 6, 6), (7.0, 7.0, 8, 8)],\n dtype=[\n (\"x\", np.float64),\n (\"u\", np.float32),\n (\"y\", np.int64),\n (\"z\", np.int32),\n ],\n )\n df = DataFrame.from_records(iter(arr), nrows=2)\n xp = DataFrame(\n {\n \"x\": np.array([1.0, 3.0], dtype=np.float64),\n \"u\": np.array([1.0, 3.0], dtype=np.float32),\n \"y\": np.array([2, 4], dtype=np.int64),\n \"z\": np.array([2, 4], dtype=np.int32),\n }\n )\n tm.assert_frame_equal(df.reindex_like(xp), xp)\n\n # no dtypes specified here, so just compare with the default\n arr = [(1.0, 2), (3.0, 4), (5.0, 6), (7.0, 8)]\n df = DataFrame.from_records(iter(arr), columns=[\"x\", \"y\"], nrows=2)\n tm.assert_frame_equal(df, xp.reindex(columns=[\"x\", \"y\"]), check_dtype=False)\n\n def test_from_records_tuples_generator(self):\n def tuple_generator(length):\n for i in range(length):\n letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n yield (i, letters[i % len(letters)], i / length)\n\n columns_names = [\"Integer\", \"String\", \"Float\"]\n columns = [\n [i[j] for i in tuple_generator(10)] for j in range(len(columns_names))\n ]\n data = {\"Integer\": columns[0], \"String\": columns[1], \"Float\": columns[2]}\n expected = DataFrame(data, columns=columns_names)\n\n generator = tuple_generator(10)\n result = DataFrame.from_records(generator, columns=columns_names)\n tm.assert_frame_equal(result, expected)\n\n def test_from_records_lists_generator(self):\n def list_generator(length):\n for i in range(length):\n letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n yield [i, letters[i % len(letters)], i / length]\n\n columns_names = [\"Integer\", \"String\", \"Float\"]\n columns = [\n [i[j] for i in list_generator(10)] for j in range(len(columns_names))\n ]\n data = {\"Integer\": columns[0], \"String\": columns[1], \"Float\": columns[2]}\n expected = DataFrame(data, columns=columns_names)\n\n generator = list_generator(10)\n result = DataFrame.from_records(generator, columns=columns_names)\n tm.assert_frame_equal(result, expected)\n\n def test_from_records_columns_not_modified(self):\n tuples = [(1, 2, 3), (1, 2, 3), (2, 5, 3)]\n\n columns = [\"a\", \"b\", \"c\"]\n original_columns = list(columns)\n\n df = DataFrame.from_records(tuples, columns=columns, index=\"a\") # noqa\n\n assert columns == original_columns\n\n def test_from_records_decimal(self):\n from decimal import Decimal\n\n tuples = [(Decimal(\"1.5\"),), (Decimal(\"2.5\"),), (None,)]\n\n df = DataFrame.from_records(tuples, columns=[\"a\"])\n assert df[\"a\"].dtype == object\n\n df = DataFrame.from_records(tuples, columns=[\"a\"], coerce_float=True)\n assert df[\"a\"].dtype == np.float64\n assert np.isnan(df[\"a\"].values[-1])\n\n def test_from_records_duplicates(self):\n result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)], columns=[\"a\", \"b\", \"a\"])\n\n expected = DataFrame([(1, 2, 3), (4, 5, 6)], columns=[\"a\", \"b\", \"a\"])\n\n tm.assert_frame_equal(result, expected)\n\n def test_from_records_set_index_name(self):\n def create_dict(order_id):\n return {\n \"order_id\": order_id,\n \"quantity\": np.random.randint(1, 10),\n \"price\": np.random.randint(1, 10),\n }\n\n documents = [create_dict(i) for i in range(10)]\n # demo missing data\n documents.append({\"order_id\": 10, \"quantity\": 5})\n\n result = DataFrame.from_records(documents, index=\"order_id\")\n assert result.index.name == \"order_id\"\n\n # MultiIndex\n result = DataFrame.from_records(documents, index=[\"order_id\", \"quantity\"])\n assert result.index.names == (\"order_id\", \"quantity\")\n\n def test_from_records_misc_brokenness(self):\n # #2179\n\n data = {1: [\"foo\"], 2: [\"bar\"]}\n\n result = DataFrame.from_records(data, columns=[\"a\", \"b\"])\n exp = DataFrame(data, columns=[\"a\", \"b\"])\n tm.assert_frame_equal(result, exp)\n\n # overlap in index/index_names\n\n data = {\"a\": [1, 2, 3], \"b\": [4, 5, 6]}\n\n result = DataFrame.from_records(data, index=[\"a\", \"b\", \"c\"])\n exp = DataFrame(data, index=[\"a\", \"b\", \"c\"])\n tm.assert_frame_equal(result, exp)\n\n # GH 2623\n rows = []\n rows.append([datetime(2010, 1, 1), 1])\n rows.append([datetime(2010, 1, 2), \"hi\"]) # test col upconverts to obj\n df2_obj = DataFrame.from_records(rows, columns=[\"date\", \"test\"])\n result = df2_obj.dtypes\n expected = Series(\n [np.dtype(\"datetime64[ns]\"), np.dtype(\"object\")], index=[\"date\", \"test\"]\n )\n tm.assert_series_equal(result, expected)\n\n rows = []\n rows.append([datetime(2010, 1, 1), 1])\n rows.append([datetime(2010, 1, 2), 1])\n df2_obj = DataFrame.from_records(rows, columns=[\"date\", \"test\"])\n result = df2_obj.dtypes\n expected = Series(\n [np.dtype(\"datetime64[ns]\"), np.dtype(\"int64\")], index=[\"date\", \"test\"]\n )\n tm.assert_series_equal(result, expected)\n\n def test_from_records_empty(self):\n # 3562\n result = DataFrame.from_records([], columns=[\"a\", \"b\", \"c\"])\n expected = DataFrame(columns=[\"a\", \"b\", \"c\"])\n tm.assert_frame_equal(result, expected)\n\n result = DataFrame.from_records([], columns=[\"a\", \"b\", \"b\"])\n expected = DataFrame(columns=[\"a\", \"b\", \"b\"])\n tm.assert_frame_equal(result, expected)\n\n def test_from_records_empty_with_nonempty_fields_gh3682(self):\n a = np.array([(1, 2)], dtype=[(\"id\", np.int64), (\"value\", np.int64)])\n df = DataFrame.from_records(a, index=\"id\")\n tm.assert_index_equal(df.index, Index([1], name=\"id\"))\n assert df.index.name == \"id\"\n tm.assert_index_equal(df.columns, Index([\"value\"]))\n\n b = np.array([], dtype=[(\"id\", np.int64), (\"value\", np.int64)])\n df = DataFrame.from_records(b, index=\"id\")\n tm.assert_index_equal(df.index, Index([], name=\"id\"))\n assert df.index.name == \"id\"\n\n def test_from_records_with_datetimes(self):\n\n # this may fail on certain platforms because of a numpy issue\n # related GH6140\n if not is_platform_little_endian():\n pytest.skip(\"known failure of test on non-little endian\")\n\n # construction with a null in a recarray\n # GH 6140\n expected = DataFrame({\"EXPIRY\": [datetime(2005, 3, 1, 0, 0), None]})\n\n arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]\n dtypes = [(\"EXPIRY\", \"<M8[ns]\")]\n\n try:\n recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)\n except (ValueError):\n pytest.skip(\"known failure of numpy rec array creation\")\n\n result = DataFrame.from_records(recarray)\n tm.assert_frame_equal(result, expected)\n\n # coercion should work too\n arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]\n dtypes = [(\"EXPIRY\", \"<M8[m]\")]\n recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)\n result = DataFrame.from_records(recarray)\n tm.assert_frame_equal(result, expected)\n\n def test_from_records_sequencelike(self):\n df = DataFrame(\n {\n \"A\": np.array(np.random.randn(6), dtype=np.float64),\n \"A1\": np.array(np.random.randn(6), dtype=np.float64),\n \"B\": np.array(np.arange(6), dtype=np.int64),\n \"C\": [\"foo\"] * 6,\n \"D\": np.array([True, False] * 3, dtype=bool),\n \"E\": np.array(np.random.randn(6), dtype=np.float32),\n \"E1\": np.array(np.random.randn(6), dtype=np.float32),\n \"F\": np.array(np.arange(6), dtype=np.int32),\n }\n )\n\n # this is actually tricky to create the recordlike arrays and\n # have the dtypes be intact\n blocks = df._to_dict_of_blocks()\n tuples = []\n columns = []\n dtypes = []\n for dtype, b in blocks.items():\n columns.extend(b.columns)\n dtypes.extend([(c, np.dtype(dtype).descr[0][1]) for c in b.columns])\n for i in range(len(df.index)):\n tup = []\n for _, b in blocks.items():\n tup.extend(b.iloc[i].values)\n tuples.append(tuple(tup))\n\n recarray = np.array(tuples, dtype=dtypes).view(np.recarray)\n recarray2 = df.to_records()\n lists = [list(x) for x in tuples]\n\n # tuples (lose the dtype info)\n result = DataFrame.from_records(tuples, columns=columns).reindex(\n columns=df.columns\n )\n\n # created recarray and with to_records recarray (have dtype info)\n result2 = DataFrame.from_records(recarray, columns=columns).reindex(\n columns=df.columns\n )\n result3 = DataFrame.from_records(recarray2, columns=columns).reindex(\n columns=df.columns\n )\n\n # list of tupels (no dtype info)\n result4 = DataFrame.from_records(lists, columns=columns).reindex(\n columns=df.columns\n )\n\n tm.assert_frame_equal(result, df, check_dtype=False)\n tm.assert_frame_equal(result2, df)\n tm.assert_frame_equal(result3, df)\n tm.assert_frame_equal(result4, df, check_dtype=False)\n\n # tuples is in the order of the columns\n result = DataFrame.from_records(tuples)\n tm.assert_index_equal(result.columns, pd.RangeIndex(8))\n\n # test exclude parameter & we are casting the results here (as we don't\n # have dtype info to recover)\n columns_to_test = [columns.index(\"C\"), columns.index(\"E1\")]\n\n exclude = list(set(range(8)) - set(columns_to_test))\n result = DataFrame.from_records(tuples, exclude=exclude)\n result.columns = [columns[i] for i in sorted(columns_to_test)]\n tm.assert_series_equal(result[\"C\"], df[\"C\"])\n tm.assert_series_equal(result[\"E1\"], df[\"E1\"].astype(\"float64\"))\n\n # empty case\n result = DataFrame.from_records([], columns=[\"foo\", \"bar\", \"baz\"])\n assert len(result) == 0\n tm.assert_index_equal(result.columns, pd.Index([\"foo\", \"bar\", \"baz\"]))\n\n result = DataFrame.from_records([])\n assert len(result) == 0\n assert len(result.columns) == 0\n\n def test_from_records_dictlike(self):\n\n # test the dict methods\n df = DataFrame(\n {\n \"A\": np.array(np.random.randn(6), dtype=np.float64),\n \"A1\": np.array(np.random.randn(6), dtype=np.float64),\n \"B\": np.array(np.arange(6), dtype=np.int64),\n \"C\": [\"foo\"] * 6,\n \"D\": np.array([True, False] * 3, dtype=bool),\n \"E\": np.array(np.random.randn(6), dtype=np.float32),\n \"E1\": np.array(np.random.randn(6), dtype=np.float32),\n \"F\": np.array(np.arange(6), dtype=np.int32),\n }\n )\n\n # columns is in a different order here than the actual items iterated\n # from the dict\n blocks = df._to_dict_of_blocks()\n columns = []\n for dtype, b in blocks.items():\n columns.extend(b.columns)\n\n asdict = {x: y for x, y in df.items()}\n asdict2 = {x: y.values for x, y in df.items()}\n\n # dict of series & dict of ndarrays (have dtype info)\n results = []\n results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))\n results.append(\n DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns)\n )\n results.append(\n DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns)\n )\n\n for r in results:\n tm.assert_frame_equal(r, df)\n\n def test_from_records_with_index_data(self):\n df = DataFrame(np.random.randn(10, 3), columns=[\"A\", \"B\", \"C\"])\n\n data = np.random.randn(10)\n df1 = DataFrame.from_records(df, index=data)\n tm.assert_index_equal(df1.index, Index(data))\n\n def test_from_records_bad_index_column(self):\n df = DataFrame(np.random.randn(10, 3), columns=[\"A\", \"B\", \"C\"])\n\n # should pass\n df1 = DataFrame.from_records(df, index=[\"C\"])\n tm.assert_index_equal(df1.index, Index(df.C))\n\n df1 = DataFrame.from_records(df, index=\"C\")\n tm.assert_index_equal(df1.index, Index(df.C))\n\n # should fail\n msg = r\"Shape of passed values is \\(10, 3\\), indices imply \\(1, 3\\)\"\n with pytest.raises(ValueError, match=msg):\n DataFrame.from_records(df, index=[2])\n with pytest.raises(KeyError, match=r\"^2$\"):\n DataFrame.from_records(df, index=2)\n\n def test_from_records_non_tuple(self):\n class Record:\n def __init__(self, *args):\n self.args = args\n\n def __getitem__(self, i):\n return self.args[i]\n\n def __iter__(self):\n return iter(self.args)\n\n recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]\n tups = [tuple(rec) for rec in recs]\n\n result = DataFrame.from_records(recs)\n expected = DataFrame.from_records(tups)\n tm.assert_frame_equal(result, expected)\n\n def test_from_records_len0_with_columns(self):\n # #2633\n result = DataFrame.from_records([], index=\"foo\", columns=[\"foo\", \"bar\"])\n expected = Index([\"bar\"])\n\n assert len(result) == 0\n assert result.index.name == \"foo\"\n tm.assert_index_equal(result.columns, expected)\n\n def test_from_records_series_list_dict(self):\n # GH27358\n expected = DataFrame([[{\"a\": 1, \"b\": 2}, {\"a\": 3, \"b\": 4}]]).T\n data = Series([[{\"a\": 1, \"b\": 2}], [{\"a\": 3, \"b\": 4}]])\n result = DataFrame.from_records(data)\n tm.assert_frame_equal(result, expected)\n\n def test_to_frame_with_falsey_names(self):\n # GH 16114\n result = Series(name=0, dtype=object).to_frame().dtypes\n expected = Series({0: object})\n tm.assert_series_equal(result, expected)\n\n result = DataFrame(Series(name=0, dtype=object)).dtypes\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\"dtype\", [None, \"uint8\", \"category\"])\n def test_constructor_range_dtype(self, dtype):\n expected = DataFrame({\"A\": [0, 1, 2, 3, 4]}, dtype=dtype or \"int64\")\n\n # GH 26342\n result = DataFrame(range(5), columns=[\"A\"], dtype=dtype)\n tm.assert_frame_equal(result, expected)\n\n # GH 16804\n result = DataFrame({\"A\": range(5)}, dtype=dtype)\n tm.assert_frame_equal(result, expected)\n\n def test_frame_from_list_subclass(self):\n # GH21226\n class List(list):\n pass\n\n expected = DataFrame([[1, 2, 3], [4, 5, 6]])\n result = DataFrame(List([List([1, 2, 3]), List([4, 5, 6])]))\n tm.assert_frame_equal(result, expected)\n\n\nclass TestDataFrameConstructorWithDatetimeTZ:\n def test_from_dict(self):\n\n # 8260\n # support datetime64 with tz\n\n idx = Index(date_range(\"20130101\", periods=3, tz=\"US/Eastern\"), name=\"foo\")\n dr = date_range(\"20130110\", periods=3)\n\n # construction\n df = DataFrame({\"A\": idx, \"B\": dr})\n assert df[\"A\"].dtype, \"M8[ns, US/Eastern\"\n assert df[\"A\"].name == \"A\"\n tm.assert_series_equal(df[\"A\"], Series(idx, name=\"A\"))\n tm.assert_series_equal(df[\"B\"], Series(dr, name=\"B\"))\n\n def test_from_index(self):\n\n # from index\n idx2 = date_range(\"20130101\", periods=3, tz=\"US/Eastern\", name=\"foo\")\n df2 = DataFrame(idx2)\n tm.assert_series_equal(df2[\"foo\"], Series(idx2, name=\"foo\"))\n df2 = DataFrame(Series(idx2))\n tm.assert_series_equal(df2[\"foo\"], Series(idx2, name=\"foo\"))\n\n idx2 = date_range(\"20130101\", periods=3, tz=\"US/Eastern\")\n df2 = DataFrame(idx2)\n tm.assert_series_equal(df2[0], Series(idx2, name=0))\n df2 = DataFrame(Series(idx2))\n tm.assert_series_equal(df2[0], Series(idx2, name=0))\n\n def test_frame_dict_constructor_datetime64_1680(self):\n dr = date_range(\"1/1/2012\", periods=10)\n s = Series(dr, index=dr)\n\n # it works!\n DataFrame({\"a\": \"foo\", \"b\": s}, index=dr)\n DataFrame({\"a\": \"foo\", \"b\": s.values}, index=dr)\n\n def test_frame_datetime64_mixed_index_ctor_1681(self):\n dr = date_range(\"2011/1/1\", \"2012/1/1\", freq=\"W-FRI\")\n ts = Series(dr)\n\n # it works!\n d = DataFrame({\"A\": \"foo\", \"B\": ts}, index=dr)\n assert d[\"B\"].isna().all()\n\n def test_frame_timeseries_to_records(self):\n index = date_range(\"1/1/2000\", periods=10)\n df = DataFrame(np.random.randn(10, 3), index=index, columns=[\"a\", \"b\", \"c\"])\n\n result = df.to_records()\n result[\"index\"].dtype == \"M8[ns]\"\n\n result = df.to_records(index=False)\n\n def test_frame_timeseries_column(self):\n # GH19157\n dr = date_range(start=\"20130101T10:00:00\", periods=3, freq=\"T\", tz=\"US/Eastern\")\n result = DataFrame(dr, columns=[\"timestamps\"])\n expected = DataFrame(\n {\n \"timestamps\": [\n Timestamp(\"20130101T10:00:00\", tz=\"US/Eastern\"),\n Timestamp(\"20130101T10:01:00\", tz=\"US/Eastern\"),\n Timestamp(\"20130101T10:02:00\", tz=\"US/Eastern\"),\n ]\n }\n )\n tm.assert_frame_equal(result, expected)\n\n def test_nested_dict_construction(self):\n # GH22227\n columns = [\"Nevada\", \"Ohio\"]\n pop = {\n \"Nevada\": {2001: 2.4, 2002: 2.9},\n \"Ohio\": {2000: 1.5, 2001: 1.7, 2002: 3.6},\n }\n result = pd.DataFrame(pop, index=[2001, 2002, 2003], columns=columns)\n expected = pd.DataFrame(\n [(2.4, 1.7), (2.9, 3.6), (np.nan, np.nan)],\n columns=columns,\n index=pd.Index([2001, 2002, 2003]),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_from_tzaware_object_array(self):\n # GH#26825 2D object array of tzaware timestamps should not raise\n dti = pd.date_range(\"2016-04-05 04:30\", periods=3, tz=\"UTC\")\n data = dti._data.astype(object).reshape(1, -1)\n df = pd.DataFrame(data)\n assert df.shape == (1, 3)\n assert (df.dtypes == dti.dtype).all()\n assert (df == dti).all().all()\n\n def test_from_tzaware_mixed_object_array(self):\n # GH#26825\n arr = np.array(\n [\n [\n Timestamp(\"2013-01-01 00:00:00\"),\n Timestamp(\"2013-01-02 00:00:00\"),\n Timestamp(\"2013-01-03 00:00:00\"),\n ],\n [\n Timestamp(\"2013-01-01 00:00:00-0500\", tz=\"US/Eastern\"),\n pd.NaT,\n Timestamp(\"2013-01-03 00:00:00-0500\", tz=\"US/Eastern\"),\n ],\n [\n Timestamp(\"2013-01-01 00:00:00+0100\", tz=\"CET\"),\n pd.NaT,\n Timestamp(\"2013-01-03 00:00:00+0100\", tz=\"CET\"),\n ],\n ],\n dtype=object,\n ).T\n res = DataFrame(arr, columns=[\"A\", \"B\", \"C\"])\n\n expected_dtypes = [\n \"datetime64[ns]\",\n \"datetime64[ns, US/Eastern]\",\n \"datetime64[ns, CET]\",\n ]\n assert (res.dtypes == expected_dtypes).all()\n"
] |
[
[
"pandas.PeriodIndex",
"pandas.Series",
"pandas.RangeIndex",
"numpy.asarray",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"numpy.dtype",
"pandas.util.testing.assert_frame_equal",
"numpy.concatenate",
"pandas.util.testing.assert_index_equal",
"numpy.random.randn",
"numpy.core.records.fromarrays",
"pandas.DataFrame.from_records",
"pandas.isna",
"numpy.random.randint",
"pandas.util.testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.util.testing.assert_series_equal",
"pandas.Index",
"pandas.Int64Index",
"numpy.ma.zeros",
"numpy.zeros",
"numpy.ma.ones",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.construction.create_series_with_explicit_dtype",
"pandas.MultiIndex",
"numpy.isnan",
"pandas.Categorical",
"pandas.util.testing.makeStringIndex",
"pandas.Timedelta",
"pandas.util.testing.getMixedTypeDict",
"numpy.timedelta64",
"pandas.compat.is_platform_little_endian",
"numpy.random.rand",
"pandas.DataFrame.from_dict",
"pandas.date_range",
"numpy.ma.masked_all",
"numpy.array",
"numpy.ma.mrecords.fromarrays",
"pandas.util.testing.makeTimeSeries",
"pandas.period_range",
"numpy.ma.copy",
"numpy.ones",
"numpy.ma.masked_array",
"pandas.Timestamp",
"numpy.empty"
]
] |
lueshen/lingshiqing
|
[
"9fe930ccf00c9a80a054a9db761443057af791bf"
] |
[
"Reports/tex/biTreePriceSimulation.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 9 13:46:08 2019\n\n@author: Leheng Chen\n\"\"\"\n\nfrom binomialTreePricer import asianOptionBinomialTree\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime, timedelta\n\nuly_names = ['Crude Oil WTI', 'Ethanol', 'Gold', 'Silver', 'Natural Gas']\nuly_init = df_uly[uly_names].tail(1)\ndf_opt['bdays'] = 1 + np.busday_count(df_opt['Start Date'].values.astype('datetime64[D]'), df_opt['Maturity Date'].values.astype('datetime64[D]'))\n\ndf_uly_vol = df_uly[uly_names].std(skipna=True)\n\noneOverRho = 3\ndf_vols = pd.DataFrame([[0.3, 0.01, 0.4, 0.1, 0.001]], columns = uly_names)\ndf_units = pd.DataFrame([[0.01, 0.0001, 1, 0.001, 0.01]], columns = uly_names)\nbdays_year = 252\n \n# =============================================================================\n# Define risk free rate, reference to US treasury yield curve as of 20190322\n# https://www.treasury.gov/resource-center/data-chart-center/interest-rates/pages/TextView.aspx?data=yieldYear&year=2019\n# 1m, 2m, 3m, 6m, 1y, 2y, 3y, 5y, 7y, 10y, 20y, 30y\n# =============================================================================\n# Define risk free rate according to US\nyieldCurveDict = {\n '2019-04-22': 2.49,\n '2019-05-22': 2.48,\n '2019-06-22': 2.46,\n '2019-09-22': 2.48,\n '2020-03-22': 2.45,\n '2021-03-22': 2.31,\n '2022-03-22': 2.24,\n '2024-03-22': 2.24,\n '2026-03-22': 2.34,\n '2029-03-22': 2.44,\n '2039-03-22': 2.69,\n '2049-03-22': 2.88\n }\n\n# Derive forward rates from US treasury yield curve\ncurvePoints = ['2019-03-22'] + list(yieldCurveDict.keys())\n\nforwardCurveDict = {}\nfor i in range(len(yieldCurveDict)):\n datePoint1 = curvePoints[i]\n datePoint2 = curvePoints[i + 1]\n if (datePoint1 == curvePoints[0]):\n forwardCurveDict[datePoint2] = yieldCurveDict[datePoint2]\n else:\n yieldAtDate1 = yieldCurveDict[datePoint1]\n yieldAtDate2 = yieldCurveDict[datePoint2]\n busDateDiff1 = np.busday_count(curvePoints[0], datePoint1)\n busDateDiff2 = np.busday_count(curvePoints[0], datePoint2)\n forwardCurveDict[datePoint2] = float((yieldAtDate2 * busDateDiff2 - yieldAtDate1 * busDateDiff1) / (busDateDiff2 - busDateDiff1))\n\n# Function to get risk free rate given a date (datetime.date object)\ndef getRiskFreeRate(inputDate):\n input_date = inputDate.date()\n for i in range(len(forwardCurveDict)):\n datePoint1 = datetime.strptime(curvePoints[i],'%Y-%m-%d').date()\n datePoint2 = datetime.strptime(curvePoints[i + 1],'%Y-%m-%d').date()\n if (input_date >= datePoint1 and input_date < datePoint2):\n return forwardCurveDict[curvePoints[i + 1]]\n return 0\n\n\nfor row in df_opt.index:\n # Retrieve the name of the underlying\n tmp_uly = df_opt['Underlying'][row][:-8]\n tmp_strike = df_opt['Strike'][row]\n tmp_maturity = df_opt['Maturity Date'][row]\n tmp_steps = df_opt['bdays'][row]\n if tmp_steps > bdays_year:\n tmp_steps = bdays_year\n tmp_init = uly_init[tmp_uly][0]\n tmp_time_period = 1 / bdays_year\n tmp_vol = df_uly_vol[tmp_uly]\n tmp_ir = get_interest_rate(tmp_steps)\n tmp_rates = [getRiskFreeRate(tmp_maturity - timedelta(d)) for d in range(tmp_steps)]\n \n tmp_call = df_opt['Call'][row]\n tmp_unit = df_units[tmp_uly][0]\n \n pricer = asianOptionBinomialTree(tmp_steps, tmp_vol, tmp_time_period, oneOverRho, tmp_rates)\n sim = pricer.getOptionPrice(tmp_init, tmp_strike * tmp_unit)\n print('undeylying: %s; bdays: %d, strile: %6.3f, init: %6.3f --> simulate: %6.3f; actual call: %6.3f' \\\n % (tmp_uly, tmp_steps, tmp_strike* tmp_unit, tmp_init, sim, tmp_call))\n"
] |
[
[
"numpy.busday_count",
"pandas.DataFrame"
]
] |
javoweb/cvat
|
[
"684544d2a06c192e7155f655897e6360b4a3be37"
] |
[
"datumaro/datumaro/plugins/datumaro_format/converter.py"
] |
[
"\n# Copyright (C) 2019 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\n# pylint: disable=no-self-use\n\nimport json\nimport numpy as np\nimport os\nimport os.path as osp\n\nfrom datumaro.components.converter import Converter\nfrom datumaro.components.extractor import (\n DEFAULT_SUBSET_NAME, Annotation,\n Label, Mask, RleMask, Points, Polygon, PolyLine, Bbox, Caption,\n LabelCategories, MaskCategories, PointsCategories\n)\nfrom datumaro.util import cast\nfrom datumaro.util.image import save_image\nimport pycocotools.mask as mask_utils\nfrom datumaro.components.cli_plugin import CliPlugin\n\nfrom .format import DatumaroPath\n\n\nclass _SubsetWriter:\n def __init__(self, name, context):\n self._name = name\n self._context = context\n\n self._data = {\n 'info': {},\n 'categories': {},\n 'items': [],\n }\n\n @property\n def categories(self):\n return self._data['categories']\n\n @property\n def items(self):\n return self._data['items']\n\n def write_item(self, item):\n annotations = []\n item_desc = {\n 'id': item.id,\n 'annotations': annotations,\n }\n if item.path:\n item_desc['path'] = item.path\n if item.has_image:\n path = item.image.path\n if self._context._save_images:\n path = self._context._save_image(item)\n\n item_desc['image'] = {\n 'size': item.image.size,\n 'path': path,\n }\n self.items.append(item_desc)\n\n for ann in item.annotations:\n if isinstance(ann, Label):\n converted_ann = self._convert_label_object(ann)\n elif isinstance(ann, Mask):\n converted_ann = self._convert_mask_object(ann)\n elif isinstance(ann, Points):\n converted_ann = self._convert_points_object(ann)\n elif isinstance(ann, PolyLine):\n converted_ann = self._convert_polyline_object(ann)\n elif isinstance(ann, Polygon):\n converted_ann = self._convert_polygon_object(ann)\n elif isinstance(ann, Bbox):\n converted_ann = self._convert_bbox_object(ann)\n elif isinstance(ann, Caption):\n converted_ann = self._convert_caption_object(ann)\n else:\n raise NotImplementedError()\n annotations.append(converted_ann)\n\n def write_categories(self, categories):\n for ann_type, desc in categories.items():\n if isinstance(desc, LabelCategories):\n converted_desc = self._convert_label_categories(desc)\n elif isinstance(desc, MaskCategories):\n converted_desc = self._convert_mask_categories(desc)\n elif isinstance(desc, PointsCategories):\n converted_desc = self._convert_points_categories(desc)\n else:\n raise NotImplementedError()\n self.categories[ann_type.name] = converted_desc\n\n def write(self, save_dir):\n with open(osp.join(save_dir, '%s.json' % (self._name)), 'w') as f:\n json.dump(self._data, f)\n\n def _convert_annotation(self, obj):\n assert isinstance(obj, Annotation)\n\n ann_json = {\n 'id': cast(obj.id, int),\n 'type': cast(obj.type.name, str),\n 'attributes': obj.attributes,\n 'group': cast(obj.group, int, 0),\n }\n return ann_json\n\n def _convert_label_object(self, obj):\n converted = self._convert_annotation(obj)\n\n converted.update({\n 'label_id': cast(obj.label, int),\n })\n return converted\n\n def _convert_mask_object(self, obj):\n converted = self._convert_annotation(obj)\n\n if isinstance(obj, RleMask):\n rle = obj.rle\n else:\n rle = mask_utils.encode(\n np.require(obj.image, dtype=np.uint8, requirements='F'))\n\n converted.update({\n 'label_id': cast(obj.label, int),\n 'rle': {\n # serialize as compressed COCO mask\n 'counts': rle['counts'].decode('ascii'),\n 'size': list(int(c) for c in rle['size']),\n }\n })\n return converted\n\n def _convert_polyline_object(self, obj):\n converted = self._convert_annotation(obj)\n\n converted.update({\n 'label_id': cast(obj.label, int),\n 'points': [float(p) for p in obj.points],\n })\n return converted\n\n def _convert_polygon_object(self, obj):\n converted = self._convert_annotation(obj)\n\n converted.update({\n 'label_id': cast(obj.label, int),\n 'points': [float(p) for p in obj.points],\n })\n return converted\n\n def _convert_bbox_object(self, obj):\n converted = self._convert_annotation(obj)\n\n converted.update({\n 'label_id': cast(obj.label, int),\n 'bbox': [float(p) for p in obj.get_bbox()],\n })\n return converted\n\n def _convert_points_object(self, obj):\n converted = self._convert_annotation(obj)\n\n converted.update({\n 'label_id': cast(obj.label, int),\n 'points': [float(p) for p in obj.points],\n 'visibility': [int(v.value) for v in obj.visibility],\n })\n return converted\n\n def _convert_caption_object(self, obj):\n converted = self._convert_annotation(obj)\n\n converted.update({\n 'caption': cast(obj.caption, str),\n })\n return converted\n\n def _convert_label_categories(self, obj):\n converted = {\n 'labels': [],\n }\n for label in obj.items:\n converted['labels'].append({\n 'name': cast(label.name, str),\n 'parent': cast(label.parent, str),\n })\n return converted\n\n def _convert_mask_categories(self, obj):\n converted = {\n 'colormap': [],\n }\n for label_id, color in obj.colormap.items():\n converted['colormap'].append({\n 'label_id': int(label_id),\n 'r': int(color[0]),\n 'g': int(color[1]),\n 'b': int(color[2]),\n })\n return converted\n\n def _convert_points_categories(self, obj):\n converted = {\n 'items': [],\n }\n for label_id, item in obj.items.items():\n converted['items'].append({\n 'label_id': int(label_id),\n 'labels': [cast(label, str) for label in item.labels],\n 'adjacent': [int(v) for v in item.adjacent],\n })\n return converted\n\nclass _Converter:\n def __init__(self, extractor, save_dir, save_images=False):\n self._extractor = extractor\n self._save_dir = save_dir\n self._save_images = save_images\n\n def convert(self):\n os.makedirs(self._save_dir, exist_ok=True)\n\n images_dir = osp.join(self._save_dir, DatumaroPath.IMAGES_DIR)\n os.makedirs(images_dir, exist_ok=True)\n self._images_dir = images_dir\n\n annotations_dir = osp.join(self._save_dir, DatumaroPath.ANNOTATIONS_DIR)\n os.makedirs(annotations_dir, exist_ok=True)\n self._annotations_dir = annotations_dir\n\n subsets = self._extractor.subsets()\n if len(subsets) == 0:\n subsets = [ None ]\n subsets = [n if n else DEFAULT_SUBSET_NAME for n in subsets]\n subsets = { name: _SubsetWriter(name, self) for name in subsets }\n\n for subset, writer in subsets.items():\n writer.write_categories(self._extractor.categories())\n\n for item in self._extractor:\n subset = item.subset\n if not subset:\n subset = DEFAULT_SUBSET_NAME\n writer = subsets[subset]\n\n writer.write_item(item)\n\n for subset, writer in subsets.items():\n writer.write(annotations_dir)\n\n def _save_image(self, item):\n image = item.image.data\n if image is None:\n return ''\n\n filename = item.image.filename\n if filename:\n filename = osp.splitext(filename)[0]\n else:\n filename = item.id\n filename += DatumaroPath.IMAGE_EXT\n image_path = osp.join(self._images_dir, filename)\n save_image(image_path, image)\n return filename\n\nclass DatumaroConverter(Converter, CliPlugin):\n @classmethod\n def build_cmdline_parser(cls, **kwargs):\n parser = super().build_cmdline_parser(**kwargs)\n parser.add_argument('--save-images', action='store_true',\n help=\"Save images (default: %(default)s)\")\n return parser\n\n def __init__(self, save_images=False):\n super().__init__()\n\n self._options = {\n 'save_images': save_images,\n }\n\n def __call__(self, extractor, save_dir):\n converter = _Converter(extractor, save_dir, **self._options)\n converter.convert()\n"
] |
[
[
"numpy.require"
]
] |
apeyrard/sjtu-work
|
[
"ca98fec3c83b81ed9091bdc968cb5ad8a74d1d6a"
] |
[
"DIP/exercises/ex10/pca.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\nimport sys\nimport os\nfrom PIL import Image\nimport numpy as np\n\nsize = None\nmatrix_x = None\nfor image in os.listdir('./washington'):\n try:\n print(image)\n with Image.open(os.path.join('./washington',image)) as im:\n imgVector = np.array(list(im.getdata()))\n imgVector = imgVector.reshape(1, imgVector.shape[0])\n try:\n matrix_x = np.vstack((matrix_x, imgVector))\n except:\n matrix_x = imgVector\n except FileNotFoundError as e:\n sys.exit(\"Error : file not found\")\n\n#matrix_x = np.array([[0,1,1,1],\n #[0,0,1,0],\n #[0,0,0,1]\n #])\n\n#mean vector\nK = matrix_x.shape[1]\nprint('K', K)\nnb = matrix_x.shape[0]\nprint('nb', nb)\nmx = np.zeros((nb, 1))\nfor x in range(K):\n for y in range(nb):\n mx[y] += matrix_x[y, x]\nmx = mx/K\n\n#covar matrix\ncx = np.zeros((nb,nb))\nfor x in range(K):\n tmp = (matrix_x[:,x])\n tmp = tmp.reshape(tmp.shape[0],1)\n cx += np.dot(tmp,tmp.T) - np.dot(mx,mx.T)\ncx = cx/K\n\neigenvalues, eigenvectors = np.linalg.eig(cx)\n#tri\neival = np.zeros(eigenvalues.shape)\neivec = np.zeros(eigenvectors.shape)\nj = 0\nfor _ in range(nb):\n maxval = eigenvalues.max()\n for i in range(eigenvalues.shape[0]):\n val = eigenvalues[i]\n if val == maxval:\n eival[j] = val\n eigenvalues[i] = 0\n eivec[j] = eigenvectors[i]\n j += 1\n break\n\n#pruning eivec\npruning = 2\neivec = eivec[:pruning,:]\nprint(eivec)\n\nmatrix_y = np.zeros((pruning, matrix_x.shape[1]))\nfor i in range(K):\n tmp = (matrix_x[:,i]).reshape(nb, 1)\n truc = np.dot(eivec,(tmp-mx))\n matrix_y[:, i] = truc.reshape(truc.shape[0])\n\n\n#reconstruction\nmatrix_x2 = np.zeros(matrix_x.shape)\nfor i in range(K):\n tmp = (matrix_y[:,i])\n tmp = tmp.reshape(tmp.shape[0], 1)\n matrix_x2[:, i] = np.array((np.dot(eivec.T,tmp)+mx).reshape(nb))\n\ndef rescale(matrix):\n matrix = matrix - matrix.min()\n matrix = matrix * 255 / matrix.max()\n return matrix\n\ndata = np.vsplit(matrix_x2, 6)\nfor i,item in enumerate(data):\n item = list(rescale(item.reshape(item.shape[1])))\n newIm = Image.new(im.mode, im.size)\n newIm.putdata(item)\n newIm.show()\n\n diff = item - matrix_x[i]\n epsilon = 0.1\n print(diff)\n for j,val in enumerate(diff):\n if abs(val) < epsilon:\n diff[j] = 0\n print(diff)\n diff = rescale(diff)\n newIm = Image.new(im.mode, im.size)\n newIm.putdata(list(diff))\n newIm.show()\n\n\n\n\n\n"
] |
[
[
"numpy.dot",
"numpy.linalg.eig",
"numpy.vsplit",
"numpy.zeros",
"numpy.vstack"
]
] |
robfalck/CADRE
|
[
"f1fb419aade62fe830d56d958f35f1e153f04363"
] |
[
"CADRE/sun.py"
] |
[
"\"\"\"\nSun discipline for CADRE\n\"\"\"\n\nfrom six.moves import range\nimport numpy as np\nimport scipy.sparse\n\nfrom openmdao.core.explicitcomponent import ExplicitComponent\n\nfrom CADRE.kinematics import computepositionrotd, computepositionrotdjacobian\nfrom CADRE.kinematics import computepositionspherical, computepositionsphericaljacobian\n\n\nclass Sun_LOS(ExplicitComponent):\n \"\"\"\n Compute the Satellite to sun line of sight.\n \"\"\"\n\n def __init__(self, n=2):\n super(Sun_LOS, self).__init__()\n\n self.n = n\n\n # Earth's radius is 6378 km. 0.85 is the alpha in John Hwang's paper\n self.r1 = 6378.137 * 0.85\n self.r2 = 6378.137\n\n def setup(self):\n n = self.n\n\n self.add_input('r_e2b_I', np.zeros((6, n), order='F'), units=None,\n desc='Position and velocity vectors from '\n 'Earth to satellite in Earth-centered '\n 'inertial frame over time.')\n\n self.add_input('r_e2s_I', np.zeros((3, n), order='F'), units='km',\n desc='Position vector from Earth to sun in Earth-centered '\n 'inertial frame over time.')\n\n self.add_output('LOS', np.zeros((n, ), order='F'), units=None,\n desc='Satellite to sun line of sight over time')\n\n def compute(self, inputs, outputs):\n \"\"\"\n Calculate outputs.\n \"\"\"\n r_e2b_I = inputs['r_e2b_I']\n r_e2s_I = inputs['r_e2s_I']\n LOS = outputs['LOS']\n\n for i in range(self.n):\n r_b = r_e2b_I[:3, i]\n r_s = r_e2s_I[:3, i]\n dot = np.dot(r_b, r_s)\n cross = np.cross(r_b, r_s)\n dist = np.sqrt(cross.dot(cross))\n\n if dot >= 0.0:\n LOS[i] = 1.0\n elif dist <= self.r1:\n LOS[i] = 0.0\n elif dist >= self.r2:\n LOS[i] = 1.0\n else:\n x = (dist - self.r1) / (self.r2 - self.r1)\n LOS[i] = 3*x**2 - 2*x**3\n\n def compute_partials(self, inputs, partials):\n \"\"\"\n Calculate and save derivatives. (i.e., Jacobian)\n \"\"\"\n r_e2b_I = inputs['r_e2b_I']\n r_e2s_I = inputs['r_e2s_I']\n\n nj = 3*self.n\n\n Jab = np.zeros(shape=(nj, ), dtype=np.float)\n Jib = np.zeros(shape=(nj, ), dtype=np.int)\n Jjb = np.zeros(shape=(nj, ), dtype=np.int)\n Jas = np.zeros(shape=(nj, ), dtype=np.float)\n Jis = np.zeros(shape=(nj, ), dtype=np.int)\n Jjs = np.zeros(shape=(nj, ), dtype=np.int)\n\n r_b = np.zeros(shape=(3, ), dtype=np.int)\n r_s = np.zeros(shape=(3, ), dtype=np.int)\n Bx = np.zeros(shape=(3, 3, ), dtype=np.int)\n Sx = np.zeros(shape=(3, 3, ), dtype=np.int)\n cross = np.zeros(shape=(3, ), dtype=np.int)\n # ddist_cross = np.zeros(shape=(3, ), dtype=np.int)\n dcross_drb = np.zeros(shape=(3, 3, ), dtype=np.int)\n dcross_drs = np.zeros(shape=(3, 3, ), dtype=np.int)\n dLOS_dx = np.zeros(shape=(3, ), dtype=np.int)\n dLOS_drs = np.zeros(shape=(3, ), dtype=np.int)\n dLOS_drb = np.zeros(shape=(3, ), dtype=np.int)\n\n for i in range(self.n):\n r_b = r_e2b_I[:3, i]\n r_s = r_e2s_I[:3, i]\n Bx = crossMatrix(r_b)\n Sx = crossMatrix(-r_s)\n dot = np.dot(r_b, r_s)\n cross = np.cross(r_b, r_s)\n dist = np.sqrt(np.dot(cross, cross))\n\n if dot >= 0.0:\n dLOS_drb[:] = 0.0\n dLOS_drs[:] = 0.0\n elif dist <= self.r1:\n dLOS_drb[:] = 0.0\n dLOS_drs[:] = 0.0\n elif dist >= self.r2:\n dLOS_drb[:] = 0.0\n dLOS_drs[:] = 0.0\n else:\n x = (dist-self.r1)/(self.r2-self.r1)\n # LOS = 3*x**2 - 2*x**3\n ddist_dcross = cross/dist\n dcross_drb = Sx\n dcross_drs = Bx\n dx_ddist = 1.0/(self.r2-self.r1)\n dLOS_dx = 6*x - 6*x**2\n dLOS_drb = dLOS_dx*dx_ddist*np.dot(ddist_dcross, dcross_drb)\n dLOS_drs = dLOS_dx*dx_ddist*np.dot(ddist_dcross, dcross_drs)\n\n for k in range(3):\n iJ = i*3 + k\n Jab[iJ] = dLOS_drb[k]\n Jib[iJ] = i\n Jjb[iJ] = (i)*6 + k\n Jas[iJ] = dLOS_drs[k]\n Jis[iJ] = i\n Jjs[iJ] = (i)*3 + k\n\n self.Jb = scipy.sparse.csc_matrix((Jab, (Jib, Jjb)),\n shape=(self.n, 6*self.n))\n self.Js = scipy.sparse.csc_matrix((Jas, (Jis, Jjs)),\n shape=(self.n, 3*self.n))\n self.JbT = self.Jb.transpose()\n self.JsT = self.Js.transpose()\n\n def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):\n \"\"\"\n Matrix-vector product with the Jacobian.\n \"\"\"\n dLOS = d_outputs['LOS']\n\n if mode == 'fwd':\n if 'r_e2b_I' in d_inputs:\n r_e2b_I = d_inputs['r_e2b_I'][:].reshape((6*self.n), order='F')\n dLOS += self.Jb.dot(r_e2b_I)\n\n if 'r_e2s_I' in d_inputs:\n r_e2s_I = d_inputs['r_e2s_I'][:].reshape((3*self.n), order='F')\n dLOS += self.Js.dot(r_e2s_I)\n\n else:\n if 'r_e2b_I' in d_inputs:\n d_inputs['r_e2b_I'] += self.JbT.dot(dLOS).reshape((6, self.n), order='F')\n if 'r_e2s_I' in d_inputs:\n d_inputs['r_e2s_I'] += self.JsT.dot(dLOS).reshape((3, self.n), order='F')\n\n\ndef crossMatrix(v):\n # so m[1,0] is v[2], for example\n m = np.array([[0.0, -v[2], v[1]],\n [v[2], 0.0, -v[0]],\n [-v[1], v[0], 0.0]])\n return m\n\n\nclass Sun_PositionBody(ExplicitComponent):\n \"\"\"\n Position vector from earth to sun in body-fixed frame.\n \"\"\"\n\n def __init__(self, n=2):\n super(Sun_PositionBody, self).__init__()\n\n self.n = n\n\n def setup(self):\n n = self.n\n\n # Inputs\n self.add_input('O_BI', np.zeros((3, 3, n), order='F'), units=None,\n desc='Rotation matrix from the Earth-centered inertial frame '\n 'to the satellite frame.')\n\n self.add_input('r_e2s_I', np.zeros((3, n), order='F'), units='km',\n desc='Position vector from Earth to Sun in Earth-centered '\n 'inertial frame over time.')\n\n # Outputs\n self.add_output('r_e2s_B', np.zeros((3, n, ), order='F'), units='km',\n desc='Position vector from Earth to Sun in body-fixed '\n 'frame over time.')\n\n def compute(self, inputs, outputs):\n \"\"\"\n Calculate outputs.\n \"\"\"\n outputs['r_e2s_B'] = computepositionrotd(self.n, inputs['r_e2s_I'],\n inputs['O_BI'])\n\n def compute_partials(self, inputs, partials):\n \"\"\"\n Calculate and save derivatives. (i.e., Jacobian)\n \"\"\"\n self.J1, self.J2 = computepositionrotdjacobian(self.n, inputs['r_e2s_I'],\n inputs['O_BI'])\n\n def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):\n \"\"\"\n Matrix-vector product with the Jacobian.\n \"\"\"\n dr_e2s_B = d_outputs['r_e2s_B']\n\n if mode == 'fwd':\n if 'O_BI' in d_inputs:\n for k in range(3):\n for u in range(3):\n for v in range(3):\n dr_e2s_B[k, :] += self.J1[:, k, u, v] * d_inputs['O_BI'][u, v, :]\n if 'r_e2s_I' in d_inputs:\n for k in range(3):\n for j in range(3):\n dr_e2s_B[k, :] += self.J2[:, k, j] * d_inputs['r_e2s_I'][j, :]\n else:\n for k in range(3):\n if 'O_BI' in d_inputs:\n dO_BI = d_inputs['O_BI']\n for u in range(3):\n for v in range(3):\n dO_BI[u, v, :] += self.J1[:, k, u, v] * dr_e2s_B[k, :]\n if 'r_e2s_I' in d_inputs:\n dr_e2s_I = d_inputs['r_e2s_I']\n for j in range(3):\n dr_e2s_I[j, :] += self.J2[:, k, j] * dr_e2s_B[k, :]\n\n\nclass Sun_PositionECI(ExplicitComponent):\n \"\"\"\n Compute the position vector from Earth to Sun in Earth-centered inertial frame.\n \"\"\"\n\n # constants\n d2r = np.pi/180.\n\n def __init__(self, n=2):\n super(Sun_PositionECI, self).__init__()\n\n self.n = n\n\n def setup(self):\n n = self.n\n\n # Inputs\n self.add_input('LD', 0.0, units=None)\n\n self.add_input('t', np.zeros((n, ), order='F'), units='s', desc='Time')\n\n # Outputs\n self.add_output('r_e2s_I', np.zeros((3, n, ), order='F'), units='km',\n desc='Position vector from Earth to Sun in Earth-centered '\n 'inertial frame over time.')\n\n self.Ja = np.zeros(3*n)\n self.Ji = np.zeros(3*n)\n self.Jj = np.zeros(3*n)\n\n def compute(self, inputs, outputs):\n \"\"\"\n Calculate outputs.\n \"\"\"\n r_e2s_I = outputs['r_e2s_I']\n\n T = inputs['LD'] + inputs['t'][:]/3600./24.\n for i in range(0, self.n):\n L = self.d2r*280.460 + self.d2r*0.9856474*T[i]\n g = self.d2r*357.528 + self.d2r*0.9856003*T[i]\n Lambda = L + self.d2r*1.914666*np.sin(g) + self.d2r*0.01999464*np.sin(2*g)\n eps = self.d2r*23.439 - self.d2r*3.56e-7*T[i]\n r_e2s_I[0, i] = np.cos(Lambda)\n r_e2s_I[1, i] = np.sin(Lambda)*np.cos(eps)\n r_e2s_I[2, i] = np.sin(Lambda)*np.sin(eps)\n\n def compute_partials(self, inputs, partials):\n \"\"\"\n Calculate and save derivatives. (i.e., Jacobian)\n \"\"\"\n T = inputs['LD'] + inputs['t'][:]/3600./24.\n dr_dt = np.empty(3)\n for i in range(0, self.n):\n L = self.d2r*280.460 + self.d2r*0.9856474*T[i]\n g = self.d2r*357.528 + self.d2r*0.9856003*T[i]\n Lambda = L + self.d2r*1.914666*np.sin(g) + self.d2r*0.01999464*np.sin(2*g)\n eps = self.d2r*23.439 - self.d2r*3.56e-7*T[i]\n\n dL_dt = self.d2r*0.9856474\n dg_dt = self.d2r*0.9856003\n dlambda_dt = (dL_dt + self.d2r*1.914666*np.cos(g)*dg_dt +\n self.d2r*0.01999464*np.cos(2*g)*2*dg_dt)\n deps_dt = -self.d2r*3.56e-7\n\n dr_dt[0] = -np.sin(Lambda)*dlambda_dt\n dr_dt[1] = np.cos(Lambda)*np.cos(eps)*dlambda_dt - np.sin(Lambda)*np.sin(eps)*deps_dt\n dr_dt[2] = np.cos(Lambda)*np.sin(eps)*dlambda_dt + np.sin(Lambda)*np.cos(eps)*deps_dt\n\n for k in range(0, 3):\n iJ = i*3 + k\n self.Ja[iJ] = dr_dt[k]\n self.Ji[iJ] = iJ\n self.Jj[iJ] = i\n\n self.J = scipy.sparse.csc_matrix((self.Ja, (self.Ji, self.Jj)),\n shape=(3*self.n, self.n))\n self.JT = self.J.transpose()\n\n def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):\n \"\"\"\n Matrix-vector product with the Jacobian.\n \"\"\"\n dr_e2s_I = d_outputs['r_e2s_I']\n\n if mode == 'fwd':\n if 'LD' in d_inputs and 't' in d_inputs:\n # TODO - Should split this up so we can hook one up but not the other.\n dr_e2s_I[:] += (self.J.dot(d_inputs['LD'] +\n d_inputs['t']/3600./24.).reshape((3, self.n), order='F'))\n else:\n r_e2s_I = dr_e2s_I[:].reshape((3*self.n), order='F')\n if 'LD' in d_inputs:\n d_inputs['LD'] += sum(self.JT.dot(r_e2s_I))\n if 't' in d_inputs:\n d_inputs['t'] += self.JT.dot(r_e2s_I)/3600.0/24.0\n\n\nclass Sun_PositionSpherical(ExplicitComponent):\n \"\"\"\n Compute the elevation angle of the Sun in the body-fixed frame.\n \"\"\"\n\n def __init__(self, n=2):\n super(Sun_PositionSpherical, self).__init__()\n\n self.n = n\n\n def setup(self):\n n = self.n\n\n # Inputs\n self.add_input('r_e2s_B', np.zeros((3, n)), units='km',\n desc='Position vector from Earth to Sun in body-fixed '\n 'frame over time.')\n\n # Outputs\n self.add_output('azimuth', np.zeros((n,)), units='rad',\n desc='Ezimuth angle of the Sun in the body-fixed frame '\n 'over time.')\n\n self.add_output('elevation', np.zeros((n, )), units='rad',\n desc='Elevation angle of the Sun in the body-fixed frame '\n 'over time.')\n\n def compute(self, inputs, outputs):\n \"\"\"\n Calculate outputs.\n \"\"\"\n azimuth, elevation = computepositionspherical(self.n, inputs['r_e2s_B'])\n\n outputs['azimuth'] = azimuth\n outputs['elevation'] = elevation\n\n def compute_partials(self, inputs, partials):\n \"\"\"\n Calculate and save derivatives. (i.e., Jacobian)\n \"\"\"\n self.Ja1, self.Ji1, self.Jj1, self.Ja2, self.Ji2, self.Jj2 = \\\n computepositionsphericaljacobian(self.n, 3*self.n, inputs['r_e2s_B'])\n self.J1 = scipy.sparse.csc_matrix((self.Ja1, (self.Ji1, self.Jj1)),\n shape=(self.n, 3*self.n))\n self.J2 = scipy.sparse.csc_matrix((self.Ja2, (self.Ji2, self.Jj2)),\n shape=(self.n, 3*self.n))\n self.J1T = self.J1.transpose()\n self.J2T = self.J2.transpose()\n\n def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):\n \"\"\"\n Matrix-vector product with the Jacobian.\n \"\"\"\n if mode == 'fwd':\n if 'r_e2s_B' in d_inputs:\n r_e2s_B = d_inputs['r_e2s_B'].reshape((3*self.n), order='F')\n if 'azimuth' in d_outputs:\n d_outputs['azimuth'] += self.J1.dot(r_e2s_B)\n if 'elevation' in d_outputs:\n d_outputs['elevation'] += self.J2.dot(r_e2s_B)\n else:\n if 'r_e2s_B' in d_inputs:\n if 'azimuth' in d_outputs:\n azimuth = d_outputs['azimuth'][:]\n d_inputs['r_e2s_B'] += self.J1T.dot(azimuth).reshape((3, self.n), order='F')\n if 'elevation' in d_outputs:\n elevation = d_outputs['elevation'][:]\n d_inputs['r_e2s_B'] += self.J2T.dot(elevation).reshape((3, self.n), order='F')\n"
] |
[
[
"numpy.dot",
"numpy.cos",
"numpy.sin",
"numpy.cross",
"numpy.array",
"numpy.zeros",
"numpy.empty"
]
] |
iggisv9t/dimreducers-crusher
|
[
"3632ed9f51d26e1732199c11750eefd54cfda45d"
] |
[
"dimreducers_crusher/reducers/N_MDS.py"
] |
[
"from .AbstractReducer import AbstractReducer\nfrom sklearn.manifold import MDS as skmds\nimport numpy as np\n\n\nclass N_MDS(AbstractReducer):\n def __init__(self, d: int = 2, random_state: int = 0, **kwargs):\n super().__init__(d, random_state)\n self._main = skmds(n_components=d, random_state=random_state, metric = False, **kwargs)\n\n def fit_transform(self, x: np.ndarray, **kwargs) -> np.ndarray:\n return self._main.fit_transform(x)\n\n def fit(self, x: np.ndarray, **kwargs):\n return self._main.fit(x)\n\n def transform(self, x: np.ndarray, **kwargs) -> np.ndarray:\n raise NotImplementedError\n\n def set_random_state(self, random_state: int = 0):\n self.random_state = random_state\n self._main.random_state = random_state\n\n @property\n def is_deterministic(self) -> bool:\n return False\n\n @property\n def is_stateful(self) -> bool:\n return True\n\n @staticmethod\n def get_parameter_ranges() -> dict:\n return {'metric': (bool, False)}\n\n"
] |
[
[
"sklearn.manifold.MDS"
]
] |
mbonyani/uav_data_harvesting
|
[
"bcb31385fefa42b7c6df7c9d44ad37b3e3ed05c4"
] |
[
"src/Map/Shadowing.py"
] |
[
"import numpy as np\nimport os\nimport tqdm\nfrom src.Map.Map import load_map\n\n\ndef bresenham(x0, y0, x1, y1, obstacles, shadow_map):\n if obstacles[y0, x0]:\n return\n x_dist = abs(x0 - x1)\n y_dist = -abs(y0 - y1)\n x_step = 1 if x1 > x0 else -1\n y_step = 1 if y1 > y0 else -1\n\n error = x_dist + y_dist\n\n # shadowed = False\n shadow_map[y0, x0] = False\n\n while x0 != x1 or y0 != y1:\n if 2 * error - y_dist > x_dist - 2 * error:\n # horizontal step\n error += y_dist\n x0 += x_step\n else:\n # vertical step\n error += x_dist\n y0 += y_step\n\n if obstacles[y0, x0]:\n # shadowed = True\n return\n\n # if shadowed:\n shadow_map[y0, x0] = False\n\n\ndef calculate_shadowing(map_path, save_as):\n total_map = load_map(map_path)\n obstacles = total_map.obstacles\n size = total_map.obstacles.shape[0]\n total = size * size\n\n total_shadow_map = np.ones((size, size, size, size), dtype=bool)\n with tqdm.tqdm(total=total) as pbar:\n for i, j in np.ndindex(total_map.obstacles.shape):\n shadow_map = np.ones((size, size), dtype=bool)\n\n for x in range(size):\n bresenham(i, j, x, 0, obstacles, shadow_map)\n bresenham(i, j, x, size - 1, obstacles, shadow_map)\n bresenham(i, j, 0, x, obstacles, shadow_map)\n bresenham(i, j, size - 1, x, obstacles, shadow_map)\n\n total_shadow_map[j, i] = shadow_map\n pbar.update(1)\n\n np.save(save_as, total_shadow_map)\n return total_shadow_map\n\n\ndef load_or_create_shadowing(map_path):\n shadow_file_name = os.path.splitext(map_path)[0] + \"_shadowing.npy\"\n if os.path.exists(shadow_file_name):\n return np.load(shadow_file_name)\n else:\n return calculate_shadowing(map_path, shadow_file_name)\n"
] |
[
[
"numpy.ndindex",
"numpy.load",
"numpy.save",
"numpy.ones"
]
] |
ilijagjorgjiev/SSD_FascadeParsing
|
[
"a31346a3828f3bda9687a9013a40389dab446cef"
] |
[
"ssd_project/utils/transformations.py"
] |
[
"import torch\nimport random\nimport torchvision.transforms.functional as FT\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport glob\nimport math\nimport numpy as np\nfrom ssd_project.utils.utils import *\nfrom ssd_project.utils.global_variables import *\n\ndevice = DEVICE\n\nclass TrainDataset(Dataset):\n \"\"\"\n Creation of the dataset for training/testing. Can be used both for training and testing/validation.\n \"\"\"\n\n def __init__(self, imgs_path, bboxes_path, labels_path, split, ratio):\n \"\"\"\n Args:\n :imgs_path - path to original images\n :bboxes_path - path to bounding boxes used as a ground truth for all images\n :labels_path - path to labes for each bounding box respectively for all images\n :split - since we are using slightly different augmentations for training and testing\n split must be either \"train\" or \"test\"\n :ratio - ratio for splliting the dataset\n \"\"\"\n self.split = split.upper()\n self.imgs_path = imgs_path\n self.bboxes_path = bboxes_path\n self.labels_path = labels_path\n self.ratio = ratio\n assert self.split in {\"TRAIN\", \"TEST\"}\n\n #Get all paths with respect to each image\n self.imgs = glob.glob(self.imgs_path + \"*\")\n self.labels = glob.glob(self.labels_path + \"labels*\")\n self.bboxes = glob.glob(self.bboxes_path + \"bboxes*\")\n\n #Sort\n self.imgs.sort()\n self.labels.sort()\n self.bboxes.sort()\n\n if(split == \"TRAIN\"):\n a = math.floor(len(self.imgs) * ratio)\n self.imgs = self.imgs[:a]\n self.labels = self.labels[:a]\n self.bboxes = self.bboxes[:a]\n else:\n a = math.ceil(len(self.imgs) * ratio)\n self.imgs = self.imgs[a:]\n self.labels = self.labels[a:]\n self.bboxes = self.bboxes[a:]\n\n\n def __getitem__(self, i):\n\n #Open an image and convert to RGB\n img = Image.open(self.imgs[i], mode = \"r\")\n img = img.convert(\"RGB\")\n\n #Load respective bounding boxes/objects and labels for each img\n objs = np.load(self.bboxes[i])\n bboxes = torch.FloatTensor(objs[:, :4])\n labels = torch.LongTensor(objs[:, 4:])\n\n #Apply augmentations based on whether we are training or testing\n img, bboxes, labels = apply_augmentations(img, bboxes, labels, self.split)\n\n return img, bboxes, labels\n\n def __len__(self):\n return len(self.imgs)\n\n def collate_fn(self, batch):\n \n \"\"\"\n For images that have different amount of objects in a batch we use a collate function.\n\n So we stack the imgs and we return the batch of imgs, with varying size tensors of bounding boxes and respective labels.\n \"\"\"\n \n imgs, imgs_bboxes, imgs_labels = [], [], []\n \n for b in batch:\n imgs.append(b[0])\n imgs_bboxes.append(b[1])\n imgs_labels.append(b[2])\n\n imgs = torch.stack(imgs, dim = 0)\n\n return imgs, imgs_bboxes, imgs_labels\n\ndef expand(img, bboxes, mean):\n \"\"\"\n Performs a zoom out operation with 50% possibility as in the paper.\n Helpful when detecting smaller objects(windows, doors). \n Ref: https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Object-Detection/blob/master/utils.py\n \"\"\"\n #Convert img to tensor, and redefine mean to fill out surrounding space of the data that our base was trained on. \n img = FT.to_tensor(img)\n filler = torch.FloatTensor(mean).unsqueeze(1).unsqueeze(1)\n if random.random() > 0.5:\n return img, bboxes\n \n \n height, width = img.size(1), img.size(2)\n #Ratio of expansion\n ratio = random.uniform(1, 4)\n #Expand\n new_img = torch.ones((3, int(ratio * height), int(ratio * width)), dtype=torch.float) * filler\n\n #place original image \n left = random.randint(0, int(ratio * width) - width)\n top = random.randint(0, int(ratio * height) - height)\n new_img[:, top:(top+height), left:(left+width)] = img\n\n #expand bounding boxes respectively\n new_bboxes = bboxes + torch.FloatTensor([left, top, left, top]).unsqueeze(0) \n\n return new_img, new_bboxes\n\ndef random_crop(image, boxes, labels):\n \"\"\"\n Performs a random crop operation with multiple possibilities as in the paper.\n Helpful when detecting bigger objects(windows, doors, buildings). \n Ref: https://github.com/amdegroot/ssd.pytorch/blob/master/utils/augmentations.py\n \"\"\"\n height, width = image.size(1), image.size(2)\n\n while True:\n #randomly choose a min overlap\n mode = random.choice([0., .1, .3, .5, .7, .9, None]) # 'None' refers to no cropping\n\n # Do not crop if NONE\n if mode is None:\n return image, boxes, labels\n\n # Do 50 trials\n max_trials = 50\n for _ in range(max_trials):\n \n # Crop dimensions must be in [0.3, 1] of original dimensions\n new_height = int(random.uniform(0.3, 1) * height)\n new_width = int(random.uniform(0.3, 1) * width)\n\n # Aspect ratio must be in [0.5, 2]\n aspect_ratio = new_height / new_width\n if not 0.5 < aspect_ratio < 2:\n continue\n\n # Get crop coordinates\n left = random.randint(0, width - new_width)\n right = left + new_width\n top = random.randint(0, height - new_height)\n bottom = top + new_height\n crop = torch.FloatTensor([left, top, right, bottom]) # (4)\n\n # Compute jaccard overlap between crop and bboxes\n overlap = jaccard_overlap(crop.unsqueeze(0),\n boxes)\n \n overlap = overlap.squeeze(0) \n\n # If all overlaps are smaller try again \n if overlap.max().item() < mode:\n continue\n\n #Crop the image\n new_image = image[:, top:bottom, left:right]\n \n #Get centers of bounding boxxes\n bb_centers = (boxes[:, :2] + boxes[:, 2:]) / 2.\n\n # Find bounding boxes whose centers are in the crop\n centers_in_crop = (bb_centers[:, 0] > left) * (bb_centers[:, 0] < right) * (bb_centers[:, 1] > top) * (bb_centers[:, 1] < bottom) \n\n #If no boxes are in the crop try again\n if not centers_in_crop.any():\n continue\n\n #Remove bounding boxes that do not satisfy cond\n new_boxes = boxes[centers_in_crop, :]\n new_labels = labels[centers_in_crop]\n\n # Compute the positions of bounding boxes in the new img\n new_boxes[:, :2] = torch.max(new_boxes[:, :2], crop[:2])\n new_boxes[:, :2] -= crop[:2]\n new_boxes[:, 2:] = torch.min(new_boxes[:, 2:], crop[2:]) \n new_boxes[:, 2:] -= crop[:2]\n\n return new_image, new_boxes, new_labels\n\ndef random_hflip(img, boxes):\n \"\"\"\n Horizontal flip of img and bounding boxes with a 50% possibility\n \"\"\"\n img = FT.to_pil_image(img)\n if random.random() > 0.5:\n return img, boxes\n #Flip Image\n img = FT.hflip(img)\n img_w = img.width\n\n # Flip bounding boxes\n new_boxes = boxes\n new_boxes[:, 0] = img_w - boxes[:, 0] - 1\n new_boxes[:, 2] = img_w - boxes[:, 2] - 1\n new_boxes = new_boxes[:, [2, 1, 0, 3]]\n\n return img, new_boxes\n\ndef photometric_distort(img):\n #REF: https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Object-Detection\n #Apply distortions on brightness, contrast, saturation, hue\n new_img = img\n\n distortions = [FT.adjust_brightness,\n FT.adjust_contrast,\n FT.adjust_saturation,\n FT.adjust_hue]\n\n random.shuffle(distortions)\n\n for d in distortions:\n if random.random() < 0.5:\n if d.__name__ is 'adjust_hue':\n #Empirical Values taken out of Original/Caffe Repo\n adjust_factor = random.uniform(-18 / 255., 18 / 255.)\n else:\n #Empirical Values taken out of Original/Caffe Repo\n adjust_factor = random.uniform(0.5, 1.5)\n\n # Apply each distortion\n new_img = d(new_img, adjust_factor)\n\n return new_img\n\n\ndef resize(image, boxes, dims=(300, 300)):\n \"\"\"\n Resize an image to 300, 300 if it is not and its bounding boxes.\n \"\"\"\n # Resize image\n new_image = FT.resize(image, dims)\n\n # Resize bounding boxes\n old_dims = torch.FloatTensor([image.width, image.height, image.width, image.height]).unsqueeze(0)\n #Percent Coordinates\n new_boxes = boxes / old_dims\n\n return new_image, new_boxes\n\ndef apply_augmentations(img, bboxes, labels, split=\"TRAIN\", augment = True):\n\n #ImageNET\n assert split in {'TRAIN', 'TEST'}\n\n # Mean and standard deviation of ImageNet data that the base/VGG from torchvision was trained on\n #https://pytorch.org/docs/stable/torchvision/models.html\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n\n\n #Apply Augmentations only on Training-Set\n if split == 'TRAIN' and augment == True:\n\n #Photometric distortions\n img = photometric_distort(img)\n \n #Zoom out operation, expand img\n img, bboxes = expand(img, bboxes, mean)\n\n #Randomly crop image(zoom in)\n img, bboxes, labels = random_crop(img, bboxes, labels)\n\n #Horizontal Flip image with a 50% chance\n img, bboxes = random_hflip(img, bboxes)\n\n # Resize image to (300, 300)\n img, bboxes = resize(img, bboxes, dims=(300, 300))\n\n # Convert PIL image to Torch tensor\n img = FT.to_tensor(img)\n\n # Normalize by mean and standard deviation of ImageNet data that our base VGG was trained on\n img = FT.normalize(img, mean=mean, std=std)\n\n return img, bboxes, labels\n"
] |
[
[
"torch.LongTensor",
"torch.max",
"torch.min",
"torch.FloatTensor",
"torch.stack",
"numpy.load"
]
] |
theandygross/TCGA
|
[
"bf36d5d69b92b09da54b4111c633f7e60cb7e210"
] |
[
"src/Figures/Survival.py"
] |
[
"\"\"\"\nCreated on Apr 7, 2013\n\n@author: agross\n\"\"\"\nfrom Processing.Helpers import get_vec_type, to_quants\nfrom Stats.Survival import get_cox_ph\n\nimport pandas as pd\nimport matplotlib.pylab as plt\nimport pandas.rpy.common as com\nimport rpy2.robjects as robjects \nfrom Stats.Survival import get_surv_fit\n\nimport numpy as np\n\nsurvival = robjects.packages.importr('survival')\nbase = robjects.packages.importr('base')\n\ncolors_global = plt.rcParams['axes.color_cycle'] * 10\n\n\ndef get_markers(censoring, survival):\n \"\"\"\n Get locations for markers in KM plot.\n censoring is a list of censoring times.\n survival is a time-series of survival values\n \"\"\"\n markers = []\n for cc in censoring:\n d = (pd.Series(survival.index, survival.index, dtype=float) - cc)\n t = d[d <= 0].idxmax()\n markers += [(cc, survival[t])]\n return markers\n\n\ndef draw_survival_curves_mpl(fit, ax=None, title=None, colors=None, ms=80, alpha=1):\n \"\"\"\n Takes an R survfit.\n \"\"\"\n if ax is None:\n _, ax = plt.subplots(1, 1, figsize=(4, 3))\n s = base.summary(fit)\n tab = pd.DataFrame({v: s.rx2(v) for v in s.names \n if len(s.rx2(v)) == len(s.rx2('time'))},\n index=s.rx2('time'))\n call = com.convert_robj(fit.rx2('call')[2])\n \n groups = robjects.r.sort(robjects.r.c(*call.feature.unique()))\n \n if 'strata' not in tab:\n groups = [0]\n tab['strata'] = 1\n elif len(tab.strata.unique()) != len(groups):\n gg = list(call[call.event > 0].feature.unique())\n gg = [g for g in groups if g in gg]\n bg = [g for g in groups if g not in gg]\n groups = gg + bg\n \n for i, group in enumerate(groups):\n censoring = call[(call.event == 0) & (call.feature == group)].days\n surv = tab[tab.strata == (i + 1)].surv\n surv = surv.copy().set_value(0., 1.)\n surv = surv.sort_index()\n if surv.index[-1] < censoring.max():\n surv = surv.set_value(censoring.max(), surv.iget(-1)).sort_index()\n\n censoring_pos = get_markers(censoring, surv)\n ax.step(surv.index, surv, lw=3, where='post', alpha=alpha, label=group)\n if colors is not None:\n try:\n \"\"\"fix for R-Python str-to-int conversion\"\"\"\n color = colors[group]\n except:\n color = colors[i]\n ax.lines[-1].set_color(color)\n if len(censoring_pos) > 0:\n ax.scatter(*zip(*censoring_pos), marker='|', s=ms,\n color=ax.lines[-1].get_color())\n \n ax.set_ylim(0, 1.05)\n # ax.set_xlim(0, max(surv.index)*1.05)\n ax.set_xlim(0, max(call.days) * 1.05)\n ax.legend(loc='best')\n ax.set_ylabel('Survival')\n ax.set_xlabel('Years')\n if title:\n ax.set_title(title)\n\n\ndef process_feature(feature, q, std):\n if (get_vec_type(feature) == 'real') and (len(feature.unique()) > 10):\n feature = to_quants(feature, q=q, std=std, labels=True)\n return feature\n\n\ndef draw_survival_curve(feature, surv, q=.25, std=None, **args):\n feature = process_feature(feature, q, std)\n fmla = robjects.Formula('Surv(days, event) ~ feature') \n m = get_cox_ph(surv, feature)\n r_data = m.rx2('call')[2]\n # s = survival.survdiff(fmla, r_data)\n # p = str(s).split('\\n\\n')[-1].strip().split(', ')[-1]\n draw_survival_curves_mpl(survival.survfit(fmla, r_data), **args)\n\n\ndef draw_survival_curves(feature, surv, assignment=None, legend='out'):\n if assignment is None:\n draw_survival_curve(feature, surv)\n return\n num_plots = len(assignment.unique())\n fig, axs = plt.subplots(1, num_plots, figsize=(num_plots * 4, 3), sharey=True)\n for i, (l, s) in enumerate(feature.groupby(assignment)):\n draw_survival_curve(s, surv, ax=axs[i],\n title='{} = {}'.format(assignment.name, l))\n if legend is 'out':\n axs[i].get_legend().set_visible(False)\n \ndef survival_stat_plot(t, upper_lim=5, axs=None, colors=None):\n \"\"\"\n t is the DataFrame returned from a get_surv_fit call.\n \"\"\"\n if axs is None:\n fig = plt.figure(figsize=(6, 1.5))\n ax = plt.subplot2grid((1, 3), (0, 0), colspan=2)\n ax2 = plt.subplot2grid((1, 3), (0, 2))\n else:\n ax, ax2 = axs\n fig = plt.gcf()\n if colors is None:\n colors = colors_global\n for i, (idx, v) in enumerate(t.iterrows()):\n conf_int = v['Median Survival']\n median_surv = v[('Median Survival', 'Median')]\n if (v['Stats']['# Events'] / v['Stats']['# Patients']) < .5:\n median_surv = np.nanmin([median_surv, 20])\n conf_int['Upper'] = np.nanmin([conf_int['Upper'], 20])\n l = ax.plot(*zip(*[[conf_int['Lower'], i], [median_surv, i], [conf_int['Upper'], i]]), lw=3, ls='--',\n marker='o', dash_joinstyle='bevel', color=colors[i])\n ax.scatter(median_surv, i, marker='s', s=100, color=l[0].get_color(), edgecolors=['black'], zorder=10,\n label=idx)\n ax.set_yticks(range(len(t)))\n ax.set_yticklabels(['{} ({})'.format(idx, int(t.ix[idx]['Stats']['# Patients'])) \n for idx in t.index])\n ax.set_ylim(-.5, i + .5)\n ax.set_xlim(0, upper_lim)\n ax.set_xlabel('Median Survival (Years)')\n \n tt = t['5y Survival']\n (tt['Surv']).plot(kind='barh', ax=ax2,\n color=[l.get_color() for l in ax.lines],\n xerr=[tt.Surv - tt.Lower, tt.Upper - tt.Surv],\n width=.75,\n ecolor='black')\n ax2.set_xlabel('5Y Survival')\n ax2.set_xticks([0, .5, 1.])\n ax2.set_yticks([])\n fig.tight_layout()\n \ndef survival_and_stats(feature, surv, upper_lim=5, axs=None, figsize=(7, 5), title=None,\n order=None, colors=None, **args):\n if axs is None:\n fig = plt.figure(figsize=figsize)\n ax1 = plt.subplot2grid((3, 3), (0, 0), colspan=3, rowspan=2)\n ax2 = plt.subplot2grid((3, 3), (2, 0), colspan=2)\n ax3 = plt.subplot2grid((3, 3), (2, 2))\n else:\n ax1, ax2, ax3 = axs\n fig = plt.gcf()\n if feature.dtype != str:\n feature = feature.astype(str)\n if colors is None:\n colors = colors_global\n \n t = get_surv_fit(surv, feature)\n if order is None:\n t = t.sort([('5y Survival', 'Surv')], ascending=True)\n else:\n t = t.ix[order]\n survival_stat_plot(t, axs=[ax2, ax3], upper_lim=upper_lim, colors=colors)\n r = pd.Series({s:i for i, s in enumerate(t.index)})\n color_lookup = {c: colors[i % len(colors)] for i, c in enumerate(t.index)}\n \n draw_survival_curve(feature, surv, ax=ax1, colors=color_lookup, **args)\n ax1.legend().set_visible(False)\n if title:\n ax1.set_title(title)\n \n fig.tight_layout()\n\n"
] |
[
[
"matplotlib.pylab.gcf",
"pandas.Series",
"matplotlib.pylab.subplot2grid",
"numpy.nanmin",
"matplotlib.pylab.figure",
"matplotlib.pylab.subplots"
]
] |
zvowell/commandLineBasics_2020
|
[
"240cb78ac92996d0708d820020230d8e5aa8bce3"
] |
[
"workshop-resources/python-script/ClassRosterFormat.py"
] |
[
"#Script to reformat a class roster into output format\n#9/26/2019\n\nimport pandas as pd\nimport sys\n\n# declare the function name as 'parser', and establish an argument for parser to take, which is named 'filePath'\ndef parser(filePath):\n # define a variable called 'df' as the value of the contents defined by filePath, and use pandas to read the contents as a csv\n df = pd.read_csv(filePath)\n\n # define a variable called 'dfdrop' as the value of 'df', but without (.drop) rows 1-7 of the 'df' csv\n dfdrop = df.drop([0, 1, 2, 3, 4, 5, 6])\n\n # define a variable called 'dffiltered' as an index (.iloc) of 'dfdrop', which takes from 'dfdrop' csv the a slice of rows 2 - the end of the columns (1:) and a slice of columns 2-4 (1:3) \n dffiltered = dfdrop.iloc[1:, 1:3]\n # define a variable called 'names' as the value of the column 'Unnamed: 2' from the index stored in the variable 'dffiltered'. Apparently pandas assigns 'Unnamed' columns that don't have headings. In this case, column 'Unnamed: 2' is the \"Student Username\" column in the original csv, because it is the 3rd column and therefore at position 2 in the 'dffiltered' index.\n names = dffiltered['Unnamed: 2']\n \n # define a variable called 'splitname1' as the value of the column 'Class List' (not sure where this name comes from) from index 'dffiltered', and use the presence of a comma (\", \") to split the string values (str.split is its own function, not 2 separate functions) into 2 columns (exand=True) and limit the amount of splits to 1 (n=1)\n splitname1 = dffiltered['Class List'].str.split(\", \", n=1,expand=True)\n # label the 2 resulting columns as 'a' (column 1) and 'b' (column 2)\n splitname1.columns = ['a', 'b']\n \n # define a variable 'h' that splits column 'b' from the variable 'splitname1' by the presence of a whitespace character (\" \"), and split them into 2 columns (expand=True)\n h = splitname1['b'].str.split(\" \", expand=True)\n # label the 2 resulting columns as 'c' and 'd'\n h.columns = ['c','d']\n # drop the column 'd' from the index stored in the variable 'h'\n h = h.drop(['d'], axis=1)\n # return the first 5 rows of the index stored in the variable 'h'\n h.head()\n\n # drop the column 'b' (which was the string value after the comma in the original 'Class List' column values) from the index stored in the variable 'splitname1'\n splitname1 = splitname1.drop(['b'], axis=1)\n # define a variable called 'final' and use pandas to concatenate the indexes stored in 'splitname1' and 'h' (which should be last name (before the comma) in 'Class List', and the first name, stripped of any middle initial, in 'Class List')\n final = pd.concat([splitname1, h], axis=1)\n # use pandas to concatenate to what's aready in 'final' the index that is found in 'names'\n final = pd.concat([final, names], axis=1)\n # label the 3 columns in the index stored in 'final' as 'First', 'Last', and 'User'\n final.columns = ['First', 'Last', 'User']\n # return 'final'?\n final\n\n # define a variable called 'email' that takes the column 'User' in 'final' and appends the string \"@calpoly.edu\" to each value in that column\n email = final['User'] + \"@calpoly.edu\"\n # use pandas to concatenate the index stored in 'email' to the existing index stored in 'final'\n final = pd.concat([final, email], axis=1)\n # label the columns in the new version of 'final' as 'First', 'Last', 'User', and 'Email'\n final.columns = ['First', 'Last', 'User', 'Email']\n\n # define a variable called 'username' and take the column 'User' in 'final' and append the string \"_CalPoly\" to each value in that column\n username = final['User'] + \"_CalPoly\"\n # use pandas to concatenate the index 'username' to the the already existing index stored in 'final'\n final = pd.concat([final, username], axis=1)\n\n # label the columns in the new version of 'final' as 'Last Name', 'First Name', 'User', 'Email', and 'Username'\n final.columns = ['Last Name', 'First Name', 'User', 'Email', 'Username']\n # take the column 'Role' (not sure where this comes from) and assign the value \"Publisher\" to each row in that column\n final['Role'] = \"Publisher\"\n # take the column 'User Type' (not sure where this comes from) and assign the value \"Creator\" to each row in that column\n final['User Type'] = \"Creator\"\n # drop the column 'User' from the index 'final'\n final = final.drop('User', axis=1)\n # bring the index from the original dataframe ('df'?) and add it as the first column\n final = final.reset_index()\n # drop the column 'index' (not sure where this comes from, or why this is necessary?) from the index 'final'\n final = final.drop(['index'], axis=1)\n\n # convert the index 'final' to the csv format, and output this new csv to a file that will be named according the value defined in the argument 'filePath' argument of function 'parser', and append the string \"_output.csv\" to the file name (not sure where the [:-4] comes into play)\n final.to_csv(filePath[:-4] + \"_output.csv\") \n\n# define a function called 'main' that takes no arguments\ndef main():\n # run the function 'parser' and pass to it through the 'filePath' argument the value of the first argument in the bash command that executes the script ('(sys.argv[1])'). This value will also be used in the occurrences of 'filePath' in the parser function defined above\n parser(sys.argv[1])\n\n# return the output of function 'main'\nmain()\n"
] |
[
[
"pandas.concat",
"pandas.read_csv"
]
] |
puraminy/OpenPrompt
|
[
"49f0ed9719bb6285e94c746de4511991c848492c"
] |
[
"openprompt/prompts/prompt_generator.py"
] |
[
"from abc import abstractmethod\nfrom builtins import ValueError\nfrom typing import List, Optional, Dict\nimport torch\nimport torch.nn.functional as F\nfrom ..utils import logger\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration, BertForMaskedLM, RobertaForMaskedLM\nfrom transformers.tokenization_utils import PreTrainedTokenizer\nfrom transformers.utils.dummy_pt_objects import PreTrainedModel\nfrom tqdm import tqdm\nfrom typing import List, Optional, Dict\nimport itertools\nimport numpy as np\nfrom ..utils import signature\nfrom ..config import convert_cfg_to_dict\nfrom torch.nn.parallel import DataParallel\n\n\nclass TemplateGenerator:\n r\"\"\" Automatic Template Search from LM-BFF\n\n Args:\n beam_width: beam search width\n max_length: maximum length of generated template\n length_limit: length limit for each part of content\n target_number: number of parts to generate, e.g. in T5, every <extra_id_{}> token is one part\n \"\"\"\n def __init__(self, \n template_generate_model: PreTrainedModel,\n tokenizer: PreTrainedTokenizer,\n max_length: Optional[int] = 20,\n target_number: Optional[int] = 2,\n beam_width: Optional[int] = 100,\n length_limit: Optional[List[int]] = None, \n forbidden_word_ids: Optional[List[int]] = []):\n self.template_generate_model = template_generate_model\n self.tokenizer = tokenizer\n self.target_number = target_number # number of parts to generate in one sample\n self.beam_width = beam_width\n self.max_length = max_length\n self.length_limit = length_limit\n self.probs_buffer, self.labels_buffer = None, None\n\n # Forbid single space token, \"....\", and \"..........\", and some other tokens based on vocab\n self.forbidden_word_ids = forbidden_word_ids\n self.sent_end_id = self.tokenizer.convert_tokens_to_ids('.')\n\n self.input_ids_buffer, self.attention_mask_buffer, self.labels_buffer = None, None, None\n\n def register_buffer(self, input_ids, attention_mask, labels):\n if self.input_ids_buffer is None :\n self.input_ids_buffer = input_ids.detach()\n self.attention_mask_buffer = attention_mask.detach()\n self.labels_buffer = labels.detach()\n else:\n self.input_ids_buffer = torch.vstack([self.input_ids_buffer, input_ids.detach()])\n self.attention_mask_buffer = torch.vstack([self.attention_mask_buffer, attention_mask.detach()])\n self.labels_buffer = torch.hstack([self.labels_buffer, labels.detach()])\n\n @abstractmethod\n def get_next_part_token_id(self, part_id: int) -> int:\n r\"\"\"get the start token id for next part\n \"\"\"\n raise NotImplementedError\n \n def convert_template(self, text_list: List[str]) -> List[str]:\n r\"\"\"convert the generated template into a standard template for downstream prompt model, return a list of str\n \"\"\"\n raise NotImplementedError\n \n @abstractmethod\n def get_templates(self):\n inner_model = self.template_generate_model.module if isinstance(self.template_generate_model, DataParallel) else self.template_generate_model\n input_ids = self.input_ids_buffer\n attention_mask = self.attention_mask_buffer\n\n ori_decoder_input_ids = torch.zeros((input_ids.size(0), self.max_length)).long()\n ori_decoder_input_ids[..., 0] = inner_model.config.decoder_start_token_id\n\n\n # decoder_input_ids: decoder inputs for next regressive generation\n # ll: log likelihood\n # output_id: which part of generated contents we are at\n # output: generated content so far\n # last_length (deprecated): how long we have generated for this part\n current_output = [{'decoder_input_ids': ori_decoder_input_ids, 'll': 0, 'output_id': 1, 'output': [], 'last_length': -1}]\n for i in tqdm(range(self.max_length - 2)):\n new_current_output = []\n for item in current_output:\n if item['output_id'] > self.target_number:\n # Enough contents\n new_current_output.append(item)\n continue\n decoder_input_ids = item['decoder_input_ids']\n\n # Forward\n batch_size = 32\n turn = input_ids.size(0) // batch_size\n if input_ids.size(0) % batch_size != 0:\n turn += 1\n aggr_output = []\n for t in range(turn):\n start = t * batch_size\n end = min((t + 1) * batch_size, input_ids.size(0))\n\n with torch.no_grad():\n aggr_output.append(self.template_generate_model(input_ids[start:end], attention_mask=attention_mask[start:end], decoder_input_ids=decoder_input_ids.to(input_ids.device)[start:end])[0])\n aggr_output = torch.cat(aggr_output, 0)\n\n # Gather results across all input sentences, and sort generated tokens by log likelihood\n aggr_output = aggr_output.mean(0)\n log_denominator = torch.logsumexp(aggr_output[i], -1).item()\n ids = list(range(inner_model.config.vocab_size))\n ids.sort(key=lambda x: aggr_output[i][x].item(), reverse=True)\n ids = ids[:self.beam_width+3]\n \n for word_id in ids:\n output_id = item['output_id']\n\n if word_id == self.get_part_token_id(output_id) or word_id == self.tokenizer.eos_token_id:\n # Finish one part\n if self.length_limit is not None and item['last_length'] < self.length_limit[output_id - 1]:\n check = False\n else:\n check = True\n output_id += 1\n last_length = 0\n else:\n last_length = item['last_length'] + 1\n check = True\n\n output_text = item['output'] + [word_id]\n ll = item['ll'] + aggr_output[i][word_id] - log_denominator\n new_decoder_input_ids = decoder_input_ids.new_zeros(decoder_input_ids.size())\n new_decoder_input_ids[:] = decoder_input_ids\n new_decoder_input_ids[..., i + 1] = word_id\n \n if word_id in self.forbidden_word_ids:\n check = False\n \n # Forbid continuous \".\"\n if len(output_text) > 1 and output_text[-2] == self.sent_end_id and output_text[-1] == self.sent_end_id:\n check = False\n\n if check:\n # Add new results to beam search pool\n new_item = {'decoder_input_ids': new_decoder_input_ids, 'll': ll, 'output_id': output_id, 'output': output_text, 'last_length': last_length}\n new_current_output.append(new_item)\n\n if len(new_current_output) == 0:\n break\n\n new_current_output.sort(key=lambda x: x['ll'], reverse=True)\n new_current_output = new_current_output[:self.beam_width]\n current_output = new_current_output\n\n self.templates_text = []\n for item in current_output:\n generate_text = []\n for i in item['output']:\n generate_text.append(self.tokenizer._convert_id_to_token(i))\n self.templates_text.append(' '.join(self.convert_template(generate_text)))\n \n def _show_template(self):\n logger.info(\"Templates are \\n{}\".format('\\n'.join(self.templates_text)))\n\n def generate(self):\n self.template_generate_model.eval()\n with torch.no_grad():\n self.get_templates()\n self._show_template()\n return self.templates_text\n\n @classmethod\n def from_config(cls, config, **kwargs,):\n init_args = signature(cls.__init__).args\n _init_dict = {**convert_cfg_to_dict(config), **kwargs}\n init_dict = {key: _init_dict[key] for key in _init_dict if key in init_args}\n template_generator = cls(**init_dict)\n return template_generator\n \n def release_memory(self):\n self.template_generate_model = self.template_generate_model.cpu()\n \n\nclass T5TemplateGenerator(TemplateGenerator): # TODO merge it into Base class\n r\"\"\" Automatic Template Search from LM-BFF using T5\n \"\"\"\n def __init__(self, \n template_generate_model: T5ForConditionalGeneration,\n tokenizer: T5Tokenizer,\n max_length: Optional[int] = 20,\n target_number: Optional[int] = 2,\n beam_width: Optional[int] = 100,\n length_limit: Optional[List[int]] = None,\n forbidden_word_ids: Optional[List[int]] = [3, 19794, 22354]):\n super().__init__(template_generate_model = template_generate_model,\n tokenizer = tokenizer,\n max_length = max_length,\n target_number= target_number,\n beam_width = beam_width,\n length_limit = length_limit,\n forbidden_word_ids = forbidden_word_ids)\n\n def get_part_token_id(self, part_id):\n return self.tokenizer.convert_tokens_to_ids('<extra_id_0>') - part_id\n\n def convert_template(self, generate_text_list):\n text_list = self.tokenizer.convert_tokens_to_string(generate_text_list).replace('<extra_id_0>', '{\"placeholder\":\"text_a\"}').replace('<extra_id_1>', ' {\"mask\"}').replace('<extra_id_2>', ' {\"placeholder\": \"text_b\"}').replace('</s>', '').replace(' ', ' ').split(' ')\n # incase no <extra_id_1> (generation stop by maximum length)\n if '{\"mask\"}' not in text_list:\n text_list.append('{\"mask\"}')\n if '{\"placeholder\": \"text_b\"}' not in text_list:\n text_list.append('{\"placeholder\": \"text_b\"}')\n return text_list\n\n\nclass VerbalizerGenerator:\n r\"\"\" Automatic Label Words Search from https://arxiv.org/pdf/2012.15723.pdf\n\n Args:\n candidate_num: the number of candidates for further selection\n label_word_num_per_class: candidate label words per class\n probs_buffer: stores the probability $q_{P,t}(1|\\mathbf{x})$ to be \n used in later label words selection.\n label_buffer: stores the label $y$ to be used in later label words\n selection.\n \"\"\"\n def __init__(self, \n model: PreTrainedModel,\n tokenizer: PreTrainedTokenizer,\n candidate_num: int,\n label_word_num_per_class: Optional[int] = 1):\n self.model = model\n self.tokenizer = tokenizer\n self.candidate_num = candidate_num\n self.label_word_num_per_class = label_word_num_per_class\n self.probs_buffer, self.labels_buffer = None, None\n\n def register_buffer(self, data):\n self.model.eval()\n with torch.no_grad():\n inner_model = self.model.module if isinstance(self.model, DataParallel) else self.model\n forward_keys = signature(inner_model.forward).args\n input_batch = {key: data[key] for key in data if key in forward_keys}\n logits = self.model.forward(**input_batch).logits[data['loss_ids']==1]\n logits = F.softmax(logits.detach(),dim=-1)\n if self.probs_buffer is None:\n self.probs_buffer = logits\n self.labels_buffer = data.label.detach()\n else:\n self.probs_buffer = torch.vstack([self.probs_buffer, logits])\n self.labels_buffer = torch.hstack([self.labels_buffer, data.label.detach()])\n \n def post_process(self, word):\n inner_model = self.model.module if isinstance(self.model, DataParallel) else self.model\n if isinstance(inner_model, RobertaForMaskedLM):\n return word.lstrip('Ġ')\n elif isinstance(inner_model, BertForMaskedLM):\n return word\n else:\n raise NotImplementedError(\"not implemented for {}\".format(type(inner_model))) # TODO add more model\n \n def invalid_label_word(self, word):\n '''\n make sure label word is the proper start of a word\n '''\n inner_model = self.model.module if isinstance(self.model, DataParallel) else self.model\n if isinstance(inner_model, RobertaForMaskedLM):\n return (not word.startswith('Ġ'))\n elif isinstance(inner_model, BertForMaskedLM):\n return False\n else:\n raise NotImplementedError(\"not implemented for {}\".format(type(inner_model))) # TODO\n\n def generate(self):\n self.label_words_ids = self._find_verbalizer()\n self.label_words = [[self.post_process(word) for word in self.tokenizer.convert_ids_to_tokens(i)] for i in self.label_words_ids]\n self._show_verbalizer()\n return self.label_words\n \n def _show_verbalizer(self):\n logger.info(\"Verbalizer is {}\".format(self.label_words))\n\n\n def _find_verbalizer(self):\n logger.info(\"Finding verbalizer ...\")\n label_words = self._get_top_words()\n label_words = self._get_top_group(candidates=label_words)\n return label_words\n\n def _eval_group(self, group):\n label_logits = self.probs_buffer[:,torch.tensor(group)]\n preds = torch.argmax(label_logits, axis=-1)\n correct = torch.sum(preds == self.labels_buffer)\n return (correct / len(self.labels_buffer)).item()\n\n def _get_top_group(self, candidates: List[List[int]]):\n groups = list(itertools.product(*candidates))\n group_scores = list(map(self._eval_group, groups))\n\n # Take top-n.\n best_idx = np.argsort(-np.array(group_scores))[:self.candidate_num]\n best_groups = [groups[i] for i in best_idx]\n return best_groups\n\n \n def _get_top_words(self):\n label_words_ids = []\n for label_id in torch.unique(self.labels_buffer):\n scores = self.probs_buffer[self.labels_buffer==label_id].mean(axis=0).cpu().numpy()\n kept = []\n for i in np.argsort(-scores):\n word = self.tokenizer.convert_ids_to_tokens([i])[0]\n if self.invalid_label_word(word):\n continue\n kept.append(i)\n label_words_ids.append(kept[:self.label_word_num_per_class])\n return label_words_ids\n \n @classmethod\n def from_config(cls, config, **kwargs,):\n init_args = signature(cls.__init__).args\n _init_dict = {**convert_cfg_to_dict(config), **kwargs}\n init_dict = {key: _init_dict[key] for key in _init_dict if key in init_args}\n verbalizer_generator = cls(**init_dict)\n return verbalizer_generator\n \n def release_memory(self):\n self.model = self.model.cpu()"
] |
[
[
"torch.cat",
"torch.sum",
"torch.tensor",
"torch.vstack",
"torch.unique",
"torch.no_grad",
"numpy.argsort",
"numpy.array",
"torch.logsumexp",
"torch.argmax"
]
] |
pnnl/SOSAT
|
[
"610f99e0bb80f2f5e7836e7e3b6b816e029838bb"
] |
[
"tests/constraints/test_regime_constraint.py"
] |
[
"import pytest\nfrom scipy.stats import lognorm\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom SOSAT import StressState\nfrom SOSAT.constraints import FaultingRegimeConstraint\nfrom SOSAT.constraints import SU\n\n# depth in meters\ndepth = 1228.3\n# density in kg/m^3\navg_overburden_density = 2580.0\n# pore pressure gradient in MPa/km\npore_pressure_grad = 9.955\n\npore_pressure = pore_pressure_grad * (1.0 / 1000) * depth\n\nss = StressState(depth=depth,\n avg_overburden_density=avg_overburden_density,\n pore_pressure=pore_pressure)\n\n\nfrc = FaultingRegimeConstraint(SU(w_NF=100.0, w_SS=50.0, w_TF=5.0,\n theta1=np.sqrt(2.0) * 0.5, k1=300.0,\n theta2=-np.sqrt(2.0) * 0.5, k2=300.0))\n\nss.add_constraint(frc)\n\nfig = ss.plot_posterior()\nplt.savefig(\"fault_regime_constraint_posterior.png\")\n"
] |
[
[
"numpy.sqrt",
"matplotlib.pyplot.savefig"
]
] |
determined-ai/gpt-neox
|
[
"9d06fa3f97fec0f70df52cd721099082476c9ee5"
] |
[
"megatron/model/norms.py"
] |
[
"import torch\nfrom .fused_layer_norm import MixedFusedLayerNorm as LayerNorm\n\n# Attempt to import FusedLayerNorm from Apex\ntry:\n from apex.normalization.fused_layer_norm import FusedLayerNorm as ApexLayerNorm\n\n # Try to use FusedLayerNorm from Apex - this will trigger an error.\n _ = ApexLayerNorm(8, eps=1e-5)\n\nexcept Exception as e:\n print('WARNING: APEX is not installed, using torch.nn.LayerNorm '\n 'instead of apex.normalization.FusedLayerNorm!')\n from torch.nn import LayerNorm as ApexLayerNorm\n\ndef get_norm(neox_args):\n if neox_args.norm == \"rmsnorm\":\n norm = RMSNorm\n eps = neox_args.rms_norm_epsilon\n elif neox_args.norm == \"layernorm\":\n eps = neox_args.layernorm_epsilon\n norm = LayerNorm\n elif neox_args.norm == \"scalenorm\":\n eps = neox_args.scalenorm_epsilon\n norm = ScaleNorm\n elif neox_args.norm == \"apexlayernorm\":\n eps = neox_args.layernorm_epsilon\n norm = ApexLayerNorm\n else:\n raise ValueError(f\"norm {neox_args.norm} not recognized\")\n return norm, eps\n\nclass RMSNorm(torch.nn.Module):\n def __init__(self, dim, p=-1., eps=1e-8, bias=False):\n \"\"\"\n Root Mean Square Layer Normalization\n :param dim: model size\n :param p: partial RMSNorm, valid value [0, 1], default -1.0 (disabled)\n :param eps: epsilon value, default 1e-8\n :param bias: whether use bias term for RMSNorm, disabled by\n default because RMSNorm doesn't enforce re-centering invariance.\n \"\"\"\n super(RMSNorm, self).__init__()\n\n self.eps = eps\n self.d = dim\n self.p = p\n self.bias = bias\n\n self.scale = torch.nn.Parameter(torch.ones(dim))\n self.register_parameter(\"scale\", self.scale)\n\n if self.bias:\n self.offset = torch.nn.Parameter(torch.zeros(dim))\n self.register_parameter(\"offset\", self.offset)\n\n def forward(self, x):\n if self.p < 0. or self.p > 1.:\n norm_x = x.norm(2, dim=-1, keepdim=True)\n d_x = self.d\n else:\n partial_size = int(self.d * self.p)\n partial_x, _ = torch.split(x, [partial_size, self.d - partial_size], dim=-1)\n\n norm_x = partial_x.norm(2, dim=-1, keepdim=True)\n d_x = partial_size\n\n rms_x = norm_x * d_x ** (-1. / 2)\n x_normed = x / (rms_x + self.eps)\n\n if self.bias:\n return self.scale * x_normed + self.offset\n\n return self.scale * x_normed\n\n\nclass ScaleNorm(torch.nn.Module):\n def __init__(self, dim, eps=1e-5):\n super().__init__()\n self.g = torch.nn.Parameter(torch.ones(1))\n self.eps = eps\n\n def forward(self, x):\n n = torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps)\n return x / n * self.g"
] |
[
[
"torch.norm",
"torch.ones",
"torch.zeros",
"torch.nn.LayerNorm",
"torch.split"
]
] |
danielrjiang/Ax
|
[
"43014b28683b3037b5c7307869cb9b75ca31ffb6"
] |
[
"ax/models/tests/test_botorch_defaults.py"
] |
[
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\nfrom unittest import mock\n\nimport torch\nfrom ax.models.torch.botorch_defaults import _get_model, get_and_fit_model\nfrom ax.utils.common.testutils import TestCase\nfrom botorch.models import FixedNoiseGP, SingleTaskGP\nfrom botorch.models.gp_regression_fidelity import SingleTaskMultiFidelityGP\nfrom botorch.models.multitask import FixedNoiseMultiTaskGP, MultiTaskGP\n\n\nclass BotorchDefaultsTest(TestCase):\n def test_get_model(self):\n x = torch.zeros(2, 2)\n y = torch.zeros(2, 1)\n var = torch.zeros(2, 1)\n partial_var = torch.tensor([0, float(\"nan\")])\n unknown_var = torch.tensor([float(\"nan\"), float(\"nan\")])\n model = _get_model(x, y, unknown_var, None)\n self.assertIsInstance(model, SingleTaskGP)\n\n model = _get_model(X=x, Y=y, Yvar=var)\n self.assertIsInstance(model, FixedNoiseGP)\n model = _get_model(X=x, Y=y, Yvar=unknown_var, task_feature=1)\n self.assertTrue(type(model) == MultiTaskGP) # Don't accept subclasses.\n model = _get_model(X=x, Y=y, Yvar=var, task_feature=1)\n self.assertIsInstance(model, FixedNoiseMultiTaskGP)\n with self.assertRaises(ValueError):\n model = _get_model(X=x, Y=y, Yvar=partial_var, task_feature=None)\n model = _get_model(X=x, Y=y, Yvar=var, fidelity_features=[-1])\n self.assertTrue(isinstance(model, SingleTaskMultiFidelityGP))\n with self.assertRaises(NotImplementedError):\n _get_model(X=x, Y=y, Yvar=var, task_feature=1, fidelity_features=[-1])\n\n @mock.patch(\"ax.models.torch.botorch_defaults._get_model\", autospec=True)\n @mock.patch(\"ax.models.torch.botorch_defaults.ModelListGP\", autospec=True)\n def test_task_feature(self, gp_mock, get_model_mock):\n x = [torch.zeros(2, 2)]\n y = [torch.zeros(2, 1)]\n yvars = [torch.ones(2, 1)]\n get_and_fit_model(\n Xs=x,\n Ys=y,\n Yvars=yvars,\n task_features=[1],\n fidelity_features=[],\n state_dict=[],\n refit_model=False,\n )\n # Check that task feature was correctly passed to _get_model\n self.assertEqual(get_model_mock.mock_calls[0][2][\"task_feature\"], 1)\n\n # check error on multiple task features\n with self.assertRaises(NotImplementedError):\n get_and_fit_model(\n Xs=x,\n Ys=y,\n Yvars=yvars,\n task_features=[0, 1],\n fidelity_features=[],\n state_dict=[],\n refit_model=False,\n )\n\n # check error on multiple fidelity features\n with self.assertRaises(NotImplementedError):\n get_and_fit_model(\n Xs=x,\n Ys=y,\n Yvars=yvars,\n task_features=[],\n fidelity_features=[-1, -2],\n state_dict=[],\n refit_model=False,\n )\n\n # check error on botch task and fidelity feature\n with self.assertRaises(NotImplementedError):\n get_and_fit_model(\n Xs=x,\n Ys=y,\n Yvars=yvars,\n task_features=[1],\n fidelity_features=[-1],\n state_dict=[],\n refit_model=False,\n )\n"
] |
[
[
"torch.ones",
"torch.zeros"
]
] |
bsobhani/bluesky-widgets
|
[
"ecc1ee02d85858ca4d2976356bc9cb70a5e015d6"
] |
[
"bluesky_widgets/models/plot_builders.py"
] |
[
"import functools\nimport itertools\n\nimport numpy\n\nfrom .plot_specs import (\n FigureSpec,\n AxesSpec,\n ImageSpec,\n LineSpec,\n)\nfrom .utils import auto_label, call_or_eval, RunManager, run_is_live_and_not_completed\nfrom ..utils.dict_view import DictView\n\n\nclass Lines:\n \"\"\"\n Plot ys vs x for the last N runs.\n\n This supports plotting columns like ``\"I0\"`` but also Python\n expressions like ``\"5 * log(I0/It)\"`` and even\n ``\"my_custom_function(I0)\"``. See examples below. Consult\n :func:`bluesky_widgets.models.utils.construct_namespace` for details\n about the available variables.\n\n Parameters\n ----------\n x : String | Callable\n Field name (e.g. \"theta\") or expression (e.g. \"- deg2rad(theta) / 2\")\n or callable with expected signature::\n\n f(run: BlueskyRun) -> x: Array\n\n Other signatures are also supported to allow for a somewhat \"magical\"\n usage. See examples below, and also see\n :func:`bluesky_widgets.models.utils.call_or_eval` for details and more\n examples.\n\n ys : List[String | Callable]\n Field name (e.g. \"theta\") or expression (e.g. \"- deg2rad(theta) / 2\")\n or callable with expected signature::\n\n f(run: BlueskyRun) -> y: Array\n\n Other signatures are also supported to allow for a somewhat \"magical\"\n usage. See examples below, and also see\n :func:`bluesky_widgets.models.utils.call_or_eval` for details and more\n examples.\n\n max_runs : Integer\n Number of Runs to visualize at once. Default is 10.\n\n label_maker : Callable, optional\n Expected signature::\n\n f(run: BlueskyRun, y: String) -> label: String\n\n needs_streams : List[String], optional\n Streams referred to by x and y. Default is ``[\"primary\"]``\n namespace : Dict, optional\n Inject additional tokens to be used in expressions for x and y\n axes : AxesSpec, optional\n If None, an axes and figure are created with default labels and titles\n derived from the ``x`` and ``y`` parameters.\n\n Attributes\n ----------\n max_runs : int\n Number of Runs to visualize at once. This may be changed at any point.\n (Note: Increasing it will not restore any Runs that have already been\n removed, but it will allow more new Runs to be added.) Runs added\n with ``pinned=True`` are exempt from the limit.\n runs : RunList[BlueskyRun]\n As runs are appended entries will be removed from the beginning of the\n last (first in, first out) so that there are at most ``max_runs``.\n pinned : Frozenset[String]\n Run uids of pinned runs.\n figure : FigureSpec\n axes : AxesSpec\n x : String | Callable\n Read-only access to x\n ys : Tuple[String | Callable]\n Read-only access to ys\n needs_streams : Tuple[String]\n Read-only access to stream names needed\n namespace : Dict\n Read-only access to user-provided namespace\n\n Examples\n --------\n\n Plot \"det\" vs \"motor\" and view it.\n\n >>> model = Lines(\"motor\", [\"det\"])\n >>> from bluesky_widgets.jupyter.figures import JupyterFigure\n >>> view = JupyterFigure(model.figure)\n >>> model.add_run(run)\n >>> model.add_run(another_run, pinned=True)\n\n Plot a mathematical transformation of the columns using any object in\n numpy. This can be given as a string expression:\n\n >>> model = Lines(\"abs(motor)\", [\"-log(det)\"])\n >>> model = Lines(\"abs(motor)\", [\"pi * det\"])\n >>> model = Lines(\"abs(motor)\", [\"sqrt(det)\"])\n\n Plot multiple lines.\n\n >>> model = Lines(\"motor\", [\"log(I0/It)\", \"log(I0)\", \"log(It)\"])\n\n Plot every tenth point.\n\n >>> model = Lines(\"motor\", [\"intesnity[::10]\"])\n\n Access data outside the \"primary\" stream, such as a stream name \"baseline\".\n\n >>> model = Lines(\"motor\", [\"intensity/baseline['intensity'][0]\"])\n\n As shown, objects from numpy can be used in expressions. You may define\n additional words, such as \"savlog\" for a Savitzky-Golay smoothing filter,\n by passing it a dict mapping the new word to the new object.\n\n >>> import scipy.signal\n >>> namespace = {\"savgol\": scipy.signal.savgol_filter}\n >>> model = Lines(\"motor\", [\"savgol(intensity, 5, 2)\"],\n ... namespace=namespace)\n\n Or you may pass in a function. It will be passed parameters according to\n their names.\n\n >>> model = Lines(\"motor\", [lambda intensity: savgol(intensity, 5, 2)])\n\n More examples of this function-based usage:\n\n >>> model = Lines(\"abs(motor)\", [lambda det: -log(det)])\n >>> model = Lines(\"abs(motor)\", [lambda det, pi: pi * det])\n >>> model = Lines(\"abs(motor)\", [lambda det, np: np.sqrt(det)])\n\n Custom, user-defined objects may be added in the same way, either by adding\n names to the namespace or providing the functions directly.\n \"\"\"\n\n def __init__(\n self,\n x,\n ys,\n *,\n max_runs=10,\n label_maker=None,\n needs_streams=(\"primary\",),\n namespace=None,\n axes=None,\n ):\n super().__init__()\n\n if label_maker is None:\n # scan_id is always generated by RunEngine but not stricter required by\n # the schema, so we fail gracefully if it is missing.\n\n if len(ys) > 1:\n\n def label_maker(run, y):\n return (\n f\"Scan {run.metadata['start'].get('scan_id', '?')} \"\n f\"{auto_label(y)}\"\n )\n\n else:\n\n def label_maker(run, y):\n return f\"Scan {run.metadata['start'].get('scan_id', '?')}\"\n\n self._x = x\n if isinstance(ys, str):\n raise ValueError(\"`ys` must be a list of strings, not a string\")\n self._ys = tuple(ys)\n self._label_maker = label_maker\n self._namespace = namespace\n if axes is None:\n axes = AxesSpec(\n x_label=auto_label(self.x),\n y_label=\", \".join(auto_label(y) for y in self.ys),\n )\n figure = FigureSpec((axes,), title=f\"{axes.y_label} v {axes.x_label}\")\n else:\n figure = axes.figure\n self.axes = axes\n self.figure = figure\n # If the Axes' figure is not yet set, listen for it to be set.\n if figure is None:\n\n def set_figure(event):\n self.figure = event.value\n # This occurs at most once, so we can now stop listening.\n self.axes.events.figure.disconnect(set_figure)\n\n self.axes.events.figure.connect(set_figure)\n\n self._color_cycle = itertools.cycle(f\"C{i}\" for i in range(10))\n\n self._run_manager = RunManager(max_runs, needs_streams)\n self._run_manager.events.run_ready.connect(self._add_lines)\n self.add_run = self._run_manager.add_run\n self.discard_run = self._run_manager.discard_run\n\n def _transform(self, run, x, y):\n return call_or_eval((x, y), run, self.needs_streams, self.namespace)\n\n def _add_lines(self, event):\n \"Add a line.\"\n run = event.run\n for y in self.ys:\n label = self._label_maker(run, y)\n # If run is in progress, give it a special color so it stands out.\n if run_is_live_and_not_completed(run):\n color = \"black\"\n\n def restyle_line_when_complete(event):\n \"When run is complete, update style.\"\n line.style.update({\"color\": next(self._color_cycle)})\n\n run.events.completed.connect(restyle_line_when_complete)\n else:\n color = next(self._color_cycle)\n style = {\"color\": color}\n\n # Style pinned runs differently.\n if run.metadata[\"start\"][\"uid\"] in self.pinned:\n style.update(linestyle=\"dashed\")\n label += \" (pinned)\"\n\n func = functools.partial(self._transform, x=self.x, y=y)\n line = LineSpec(func, run, label, style)\n self._run_manager.track_artist(line)\n self.axes.lines.append(line)\n\n @property\n def x(self):\n return self._x\n\n @property\n def ys(self):\n return self._ys\n\n @property\n def namespace(self):\n return DictView(self._namespace or {})\n\n # Expose some properties from the internal RunManger helper class.\n\n @property\n def runs(self):\n return self._run_manager.runs\n\n @property\n def max_runs(self):\n return self._run_manager.max_runs\n\n @max_runs.setter\n def max_runs(self, value):\n self._run_manager.max_runs = value\n\n @property\n def needs_streams(self):\n return self._run_manager._needs_streams\n\n @property\n def pinned(self):\n return self._run_manager._pinned\n\n\nclass Images:\n \"\"\"\n Plot an image from a Run.\n\n By default, higher-dimensional data is handled by repeatedly averaging over\n the leading dimension until there are only two dimensions.\n\n Parameters\n ----------\n\n field : string\n Field name or expression\n max_runs : Integer\n Number of Runs to visualize at once. Default is 1.\n label_maker : Callable, optional\n Expected signature::\n\n f(run: BlueskyRun, y: String) -> label: String\n\n needs_streams : List[String], optional\n Streams referred to by field. Default is ``[\"primary\"]``\n namespace : Dict, optional\n Inject additional tokens to be used in expressions for x and y\n axes : AxesSpec, optional\n If None, an axes and figure are created with default labels and titles\n derived from the ``x`` and ``y`` parameters.\n\n Attributes\n ----------\n max_runs : int\n Number of Runs to visualize at once. This may be changed at any point.\n (Note: Increasing it will not restore any Runs that have already been\n removed, but it will allow more new Runs to be added.) Runs added\n with ``pinned=True`` are exempt from the limit.\n runs : RunList[BlueskyRun]\n As runs are appended entries will be removed from the beginning of the\n last (first in, first out) so that there are at most ``max_runs``.\n pinned : Frozenset[String]\n Run uids of pinned runs.\n figure : FigureSpec\n axes : AxesSpec\n field : String\n Read-only access to field or expression\n needs_streams : List[String], optional\n Read-only access to streams referred to by field.\n namespace : Dict, optional\n Read-only access to user-provided namespace\n\n Examples\n --------\n >>> model = Images(\"ccd\")\n >>> from bluesky_widgets.jupyter.figures import JupyterFigure\n >>> view = JupyterFigure(model.figure)\n >>> model.add_run(run)\n \"\"\"\n\n # TODO: fix x and y limits here\n\n def __init__(\n self,\n field,\n *,\n max_runs=1,\n label_maker=None,\n needs_streams=(\"primary\",),\n namespace=None,\n axes=None,\n ):\n super().__init__()\n\n if label_maker is None:\n # scan_id is always generated by RunEngine but not stricter required by\n # the schema, so we fail gracefully if it is missing.\n\n def label_maker(run, field):\n md = run.metadata[\"start\"]\n return (\n f\"Scan ID {md.get('scan_id', '?')} UID {md['uid'][:8]} \"\n f\"{auto_label(field)}\"\n )\n\n self._field = field\n self._label_maker = label_maker\n self._namespace = namespace\n if axes is None:\n axes = AxesSpec()\n figure = FigureSpec((axes,), title=\"\")\n else:\n figure = axes.figure\n self.axes = axes\n self.figure = figure\n # If the Axes' figure is not yet set, listen for it to be set.\n if figure is None:\n\n def set_figure(event):\n self.figure = event.value\n # This occurs at most once, so we can now stop listening.\n self.axes.events.figure.disconnect(set_figure)\n\n self.axes.events.figure.connect(set_figure)\n\n self._run_manager = RunManager(max_runs, needs_streams)\n self._run_manager.events.run_ready.connect(self._add_images)\n self.add_run = self._run_manager.add_run\n self.discard_run = self._run_manager.discard_run\n\n def _add_images(self, event):\n run = event.run\n func = functools.partial(self._transform, field=self.field)\n image = ImageSpec(func, run, label=self.field)\n array_shape = run.primary.read()[self.field].shape\n self._run_manager.track_artist(image)\n self.axes.images.append(image)\n self.axes.title = self._label_maker(run, self.field)\n # By default, pixels center on integer coordinates ranging from 0 to\n # columns-1 horizontally and 0 to rows-1 vertically.\n # In order to see entire pixels, we set lower limits to -0.5\n # and upper limits to columns-0.5 horizontally and rows-0.5 vertically\n # if limits aren't specifically set.\n if self.axes.x_limits is None:\n self.axes.x_limits = (-0.5, array_shape[-1] - 0.5)\n if self.axes.y_limits is None:\n self.axes.y_limits = (-0.5, array_shape[-2] - 0.5)\n # TODO Set axes x, y from xarray dims\n\n def _transform(self, run, field):\n (data,) = numpy.asarray(\n call_or_eval((field,), run, self.needs_streams, self.namespace)\n )\n # If the data is more than 2D, take the middle slice from the leading\n # axis until there are only two axes.\n while data.ndim > 2:\n middle = data.shape[0] // 2\n data = data[middle]\n return data\n\n @property\n def field(self):\n return self._field\n\n @property\n def namespace(self):\n return DictView(self._namespace or {})\n\n # Expose some properties from the internal RunManger helper class.\n\n @property\n def runs(self):\n return self._run_manager.runs\n\n @property\n def max_runs(self):\n return self._run_manager.max_runs\n\n @max_runs.setter\n def max_runs(self, value):\n self._run_manager.max_runs = value\n\n @property\n def needs_streams(self):\n return self._run_manager._needs_streams\n\n @property\n def pinned(self):\n return self._run_manager._pinned\n\n\nclass RasteredImages:\n \"\"\"\n Plot a rastered image from a Run.\n\n Parameters\n ----------\n\n field : string\n Field name or expression\n shape : Tuple[Integer]\n The (row, col) shape of the raster\n label_maker : Callable, optional\n Expected signature::\n\n f(run: BlueskyRun, y: String) -> label: String\n\n needs_streams : List[String], optional\n Streams referred to by field. Default is ``[\"primary\"]``\n namespace : Dict, optional\n Inject additional tokens to be used in expressions for x and y\n axes : AxesSpec, optional\n If None, an axes and figure are created with default labels and titles\n derived from the ``x`` and ``y`` parameters.\n clim : Tuple, optional\n The color limits\n cmap : String or Colormap, optional\n The color map to use\n extent : scalars (left, right, bottom, top), optional\n Passed through to :meth:`matplotlib.axes.Axes.imshow`\n x_positive : String, optional\n Defines the positive direction of the x axis, takes the values 'right'\n (default) or 'left'.\n y_positive : String, optional\n Defines the positive direction of the y axis, takes the values 'up'\n (default) or 'down'.\n\n Attributes\n ----------\n run : BlueskyRun\n The currently-viewed Run\n figure : FigureSpec\n axes : AxesSpec\n field : String\n Read-only access to field or expression\n needs_streams : List[String], optional\n Read-only access to streams referred to by field.\n namespace : Dict, optional\n Read-only access to user-provided namespace\n\n Examples\n --------\n >>> model = RasteredImages(\"intensity\", shape=(100, 200))\n >>> from bluesky_widgets.jupyter.figures import JupyterFigure\n >>> view = JupyterFigure(model.figure)\n >>> model.add_run(run)\n \"\"\"\n\n def __init__(\n self,\n field,\n shape,\n *,\n max_runs=1,\n label_maker=None,\n needs_streams=(\"primary\",),\n namespace=None,\n axes=None,\n clim=None,\n cmap=\"viridis\",\n extent=None,\n x_positive=\"right\",\n y_positive=\"up\",\n ):\n super().__init__()\n\n if label_maker is None:\n # scan_id is always generated by RunEngine but not stricter required by\n # the schema, so we fail gracefully if it is missing.\n\n def label_maker(run, field):\n md = run.metadata[\"start\"]\n return (\n f\"Scan ID {md.get('scan_id', '?')} UID {md['uid'][:8]} {field}\"\n )\n\n self._label_maker = label_maker\n\n # Stash these and expose them as read-only properties.\n self._field = field\n self._shape = shape\n self._namespace = namespace\n\n self._run = None\n\n if axes is None:\n axes = AxesSpec()\n figure = FigureSpec((axes,), title=\"\")\n else:\n figure = axes.figure\n self.axes = axes\n self.figure = figure\n # If the Axes' figure is not yet set, listen for it to be set.\n if figure is None:\n\n def set_figure(event):\n self.figure = event.value\n # This occurs at most once, so we can now stop listening.\n self.axes.events.figure.disconnect(set_figure)\n\n self.axes.events.figure.connect(set_figure)\n self._clim = clim\n self._cmap = cmap\n self._extent = extent\n self._x_positive = x_positive\n self._y_positive = y_positive\n\n self._run_manager = RunManager(max_runs, needs_streams)\n self._run_manager.events.run_ready.connect(self._add_image)\n self.add_run = self._run_manager.add_run\n self.discard_run = self._run_manager.discard_run\n\n @property\n def cmap(self):\n return self._cmap\n\n @cmap.setter\n def cmap(self, value):\n self._cmap = value\n for image in self.axes.images:\n image.style.update({\"cmap\": value})\n\n @property\n def clim(self):\n return self._clim\n\n @clim.setter\n def clim(self, value):\n self._clim = value\n for image in self.axes.images:\n image.style.update({\"clim\": value})\n\n @property\n def extent(self):\n return self._extent\n\n @extent.setter\n def extent(self, value):\n self._extent = value\n for image in self.axes.images:\n image.style.update({\"extent\": value})\n\n @property\n def x_positive(self):\n xmin, xmax = self.axes.x_limits\n if xmin > xmax:\n self._x_positive = \"left\"\n else:\n self._x_positive = \"right\"\n return self._x_positive\n\n @x_positive.setter\n def x_positive(self, value):\n if value not in [\"right\", \"left\"]:\n raise ValueError('x_positive must be \"right\" or \"left\"')\n self._x_positive = value\n xmin, xmax = self.axes.x_limits\n if (xmin > xmax and self._x_positive == \"right\") or (\n xmax > xmin and self._x_positive == \"left\"\n ):\n self.axes.x_limits = (xmax, xmin)\n elif (xmax >= xmin and self._x_positive == \"right\") or (\n xmin >= xmax and self._x_positive == \"left\"\n ):\n self.axes.x_limits = (xmin, xmax)\n self._x_positive = value\n\n @property\n def y_positive(self):\n ymin, ymax = self.axes.y_limits\n if ymin > ymax:\n self._y_positive = \"down\"\n else:\n self._y_positive = \"up\"\n return self._y_positive\n\n @y_positive.setter\n def y_positive(self, value):\n if value not in [\"up\", \"down\"]:\n raise ValueError('y_positive must be \"up\" or \"down\"')\n self._y_positive = value\n ymin, ymax = self.axes.y_limits\n if (ymin > ymax and self._y_positive == \"up\") or (\n ymax > ymin and self._y_positive == \"down\"\n ):\n self.axes.y_limits = (ymax, ymin)\n elif (ymax >= ymin and self._y_positive == \"up\") or (\n ymin >= ymax and self._y_positive == \"down\"\n ):\n self.axes.y_limits = (ymin, ymax)\n self._y_positive = value\n\n def _add_image(self, event):\n run = event.run\n func = functools.partial(self._transform, field=self.field)\n style = {\"cmap\": self._cmap, \"clim\": self._clim, \"extent\": self._extent}\n image = ImageSpec(func, run, label=self.field, style=style)\n self._run_manager.track_artist(image)\n md = run.metadata[\"start\"]\n self.axes.images.append(image)\n self.axes.title = self._label_maker(run, self.field)\n self.axes.x_label = md[\"motors\"][1]\n self.axes.y_label = md[\"motors\"][0]\n # By default, pixels center on integer coordinates ranging from 0 to\n # columns-1 horizontally and 0 to rows-1 vertically.\n # In order to see entire pixels, we set lower limits to -0.5\n # and upper limits to columns-0.5 horizontally and rows-0.5 vertically\n # if limits aren't specifically set.\n if self.axes.x_limits is None and self._x_positive == \"right\":\n self.axes.x_limits = (-0.5, md[\"shape\"][1] - 0.5)\n elif self.axes.x_limits is None and self._x_positive == \"left\":\n self.axes.x_limits = (md[\"shape\"][1] - 0.5, -0.5)\n if self.axes.y_limits is None and self._y_positive == \"up\":\n self.axes.y_limits = (-0.5, md[\"shape\"][0] - 0.5)\n elif self.axes.y_limits is None and self._y_positive == \"down\":\n self.axes.y_limits = (md[\"shape\"][0] - 0.5, -0.5)\n # TODO Try to make the axes aspect equal unless the extent is highly non-square.\n ...\n\n def _transform(self, run, field):\n image_data = numpy.ones(self._shape) * numpy.nan\n (data,) = numpy.asarray(\n call_or_eval((field,), run, self.needs_streams, self.namespace)\n )\n snaking = run.metadata[\"start\"][\"snaking\"]\n for index in range(len(data)):\n pos = list(numpy.unravel_index(index, self._shape))\n if snaking[1] and (pos[0] % 2):\n pos[1] = self._shape[1] - pos[1] - 1\n pos = tuple(pos)\n image_data[pos] = data[index]\n\n return image_data\n\n @property\n def namespace(self):\n return DictView(self._namespace or {})\n\n @property\n def field(self):\n return self._field\n\n @property\n def shape(self):\n return self._shape\n\n # Expose some properties from the internal RunManger helper class.\n\n @property\n def runs(self):\n return self._run_manager.runs\n\n @property\n def max_runs(self):\n return self._run_manager.max_runs\n\n @max_runs.setter\n def max_runs(self, value):\n self._run_manager.max_runs = value\n\n @property\n def needs_streams(self):\n return self._run_manager._needs_streams\n\n @property\n def pinned(self):\n return self._run_manager._pinned\n"
] |
[
[
"numpy.unravel_index",
"numpy.ones"
]
] |
gefux/OQuPy
|
[
"764528fb6181ea62f8829a9e0a9e3faa2af71e1f"
] |
[
"tests/physics/example_C_test.py"
] |
[
"# Copyright 2020 The TEMPO Collaboration\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nTests for the time_evovling_mpo.backends.tensor_network modules.\n\"\"\"\n\nimport pytest\nimport numpy as np\n\nimport oqupy\nfrom oqupy import process_tensor\n\n# -----------------------------------------------------------------------------\n# -- Test C: Spin-1 boson model -------------------------------------------------\n\n# Initial state:\ninitial_state_C = np.array([[0.0,0.0,0.0],\n [0.0,1.0,0.0],\n [0.0,0.0,0.0]])\n\n# System operator\nh_sys_C = np.array([[1.0,0.0,0.0],\n [0.0,0.5,0.0],\n [0.0,0.0,0.0]])\nh_sys_C += np.array([[0.0,0.5,0.0],\n [0.5,0.0,0.5],\n [0.0,0.5,0.0]])\n\n# Markovian dissipation\ngamma_C_1 = 0.1\ngamma_C_2 = 0.2\nlindblad_operators_C_1 = np.array([[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0]])\nlindblad_operators_C_2 = np.array([[0.5, 0.0, 0.0],\n [0.0,-0.5, 0.0],\n [0.0, 0.0, 0.0]])\n\n# Ohmic spectral density with exponential cutoff\ncoupling_operator_C = np.array([[0.5, 0.0, 0.0],\n [0.0,-0.5, 0.0],\n [0.0, 0.0, 0.0]])\nalpha_C = 0.3\ncutoff_C = 5.0\ntemperature_C = 0.0\n\n# end time\nt_end_C = 1.0\n\n# result obtained with release code (made hermitian, dkmax=10):\nrho_C = np.array(\n [[ 0.12576653+0.j ,-0.11739956-0.14312036j, 0.12211454-0.05963583j],\n [-0.11739956+0.14312036j, 0.61315893+0.j ,-0.06636825+0.26917271j],\n [ 0.12211454+0.05963583j,-0.06636825-0.26917271j, 0.26107455+0.j ]])\n\ncorrelations_C = oqupy.PowerLawSD(alpha=alpha_C,\n zeta=1.0,\n cutoff=cutoff_C,\n cutoff_type=\"exponential\",\n temperature=temperature_C,\n name=\"ohmic\")\nbath_C = oqupy.Bath(coupling_operator_C,\n correlations_C,\n name=\"phonon bath\")\nsystem_C = oqupy.System(h_sys_C,\n gammas=[gamma_C_1, gamma_C_2],\n lindblad_operators=[lindblad_operators_C_1,\n lindblad_operators_C_2])\n\n\n# -----------------------------------------------------------------------------\n\ndef test_tensor_network_tempo_backend_C():\n tempo_params_C = oqupy.TempoParameters(\n dt=0.05,\n dkmax=10,\n epsrel=10**(-7),\n add_correlation_time=None)\n tempo_C = oqupy.Tempo(system_C,\n bath_C,\n tempo_params_C,\n initial_state_C,\n start_time=0.0)\n tempo_C.compute(end_time=1.0)\n dyn_C = tempo_C.get_dynamics()\n assert dyn_C.times[-1] == 1.0\n np.testing.assert_almost_equal(dyn_C.states[-1], rho_C, decimal=4)\n\n\ndef test_tensor_network_pt_tempo_backend_C():\n tempo_params_C = oqupy.TempoParameters(\n dt=0.05,\n dkmax=10,\n epsrel=10**(-7),\n add_correlation_time=None)\n pt = oqupy.pt_tempo_compute(\n bath_C,\n start_time=0.0,\n end_time=1.0,\n parameters=tempo_params_C)\n\n dyn = oqupy.compute_dynamics(\n system=system_C,\n process_tensor=pt,\n initial_state=initial_state_C)\n assert dyn.times[-1] == 1.0\n np.testing.assert_almost_equal(dyn.states[-1], rho_C, decimal=4)\n\n# -----------------------------------------------------------------------------\n"
] |
[
[
"numpy.testing.assert_almost_equal",
"numpy.array"
]
] |
timcast725/MVCNN_Pytorch
|
[
"0ec1d9705540d6d279cf8309cad264f40325997a"
] |
[
"models/mvcnn_bottom_col.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\n\n\n__all__ = ['MVCNN_bottom', 'mvcnn_bottom']\n\n\nmodel_urls = {\n 'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',\n}\n\n\nclass MVCNN_bottom(nn.Module):\n\n def __init__(self, num_classes=1000):\n super(MVCNN_bottom, self).__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=6, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=3),\n nn.Conv2d(64, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=3),\n )\n self.classifier = nn.Sequential(\n nn.Linear(256 * 6 * 6, 4096),\n )\n\n def forward(self, x):\n x = x.transpose(0, 1)\n \n view_pool = []\n \n for v in x:\n v = self.features(v)\n v = v.view(v.size(0), 256 * 6 * 6)\n \n view_pool.append(v)\n \n pooled_view = view_pool[0]\n for i in range(1, len(view_pool)):\n pooled_view = torch.max(pooled_view, view_pool[i])\n \n pooled_view = self.classifier(pooled_view)\n return pooled_view\n\n\ndef mvcnn_bottom(pretrained=False, **kwargs):\n r\"\"\"MVCNN model architecture from the\n `\"Multi-view Convolutional...\" <hhttp://vis-www.cs.umass.edu/mvcnn/docs/su15mvcnn.pdf>`_ paper.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = MVCNN_bottom(**kwargs)\n if pretrained:\n pretrained_dict = model_zoo.load_url(model_urls['alexnet'])\n model_dict = model.state_dict()\n # 1. filter out unnecessary keys\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict and v.shape == model_dict[k].shape}\n # 2. overwrite entries in the existing state dict\n model_dict.update(pretrained_dict)\n # 3. load the new state dict\n model.load_state_dict(model_dict)\n return model\n"
] |
[
[
"torch.max",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.