repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
tylerwmarrs/seglearn
[ "01fa65b3e326091671043f30c0310eead0e78bc2" ]
[ "seglearn/tests/test_pipe.py" ]
[ "# Author: David Burns\n# License: BSD\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import Ridge\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import cross_validate\n\nfrom seglearn.pipe import Pype\nfrom seglearn.transform import FeatureRep, SegmentX, SegmentXY, SegmentXYForecast, PadTrunc\nfrom seglearn.base import TS_Data\n\n\ndef yvals(y):\n if len(np.atleast_1d(y[0])) > 1:\n return np.unique(np.concatenate(y))\n else:\n return np.unique(y)\n\n\ndef transformation_test(clf, X, y):\n clf.fit(X, y)\n Xtr1, ytr1 = clf.transform(X, y)\n Xtr2, ytr2 = clf.fit_transform(X, y)\n assert np.all(Xtr1 == Xtr2)\n assert np.all(ytr1 == ytr2)\n assert np.all(np.isin(np.unique(ytr1), yvals(y)))\n assert len(Xtr1) == len(ytr1)\n\n\ndef test_pipe_transformation():\n # SegmentX transform pipe\n pipe = Pype([('seg', SegmentX()),\n ('ftr', FeatureRep()),\n ('scaler', StandardScaler())])\n Xt = [np.random.rand(1000, 10), np.random.rand(100, 10), np.random.rand(500, 10)]\n Xc = np.random.rand(3, 3)\n X = TS_Data(Xt, Xc)\n y = [1, 2, 3]\n transformation_test(pipe, X, y)\n\n X = pd.DataFrame(Xc)\n X['ts_data'] = Xt\n X = TS_Data.from_df(X)\n transformation_test(pipe, X, y)\n\n # SegmentXY transform pipe\n pipe = Pype([('seg', SegmentXY()),\n ('ftr', FeatureRep()),\n ('scaler', StandardScaler())])\n Xt = [np.random.rand(1000, 10), np.random.rand(100, 10), np.random.rand(500, 10)]\n Xc = np.random.rand(3, 3)\n X = TS_Data(Xt, Xc)\n y = [np.random.rand(1000), np.random.rand(100), np.random.rand(500)]\n transformation_test(pipe, X, y)\n\n X = pd.DataFrame(Xc)\n X['ts_data'] = Xt\n X = TS_Data.from_df(X)\n transformation_test(pipe, X, y)\n\n # Forecast transform pipe\n pipe = Pype([('seg', SegmentXYForecast()),\n ('ftr', FeatureRep()),\n ('scaler', StandardScaler())])\n Xt = [np.random.rand(1000, 10), np.random.rand(100, 10), np.random.rand(500, 10)]\n Xc = np.random.rand(3, 3)\n X = TS_Data(Xt, Xc)\n y = [np.random.rand(1000), np.random.rand(100), np.random.rand(500)]\n transformation_test(pipe, X, y)\n\n X = pd.DataFrame(Xc)\n X['ts_data'] = Xt\n X = TS_Data.from_df(X)\n transformation_test(pipe, X, y)\n\n # Padtrunc transform pipe\n pipe = Pype([('trunc', PadTrunc()),\n ('ftr', FeatureRep()),\n ('scaler', StandardScaler())])\n Xt = [np.random.rand(1000, 10), np.random.rand(100, 10), np.random.rand(500, 10)]\n Xc = np.random.rand(3, 3)\n X = TS_Data(Xt, Xc)\n y = [1, 2, 3]\n transformation_test(pipe, X, y)\n\n X = pd.DataFrame(Xc)\n X['ts_data'] = Xt\n X = TS_Data.from_df(X)\n transformation_test(pipe, X, y)\n\n\ndef classifier_test(clf, X, y):\n yv = yvals(y)\n clf.fit(X, y)\n yp = clf.predict(X)\n ytr, yp2 = clf.transform_predict(X, y)\n assert np.all(np.isin(np.unique(ytr), yv))\n assert len(ytr) == len(yp2)\n assert np.all(np.isin(np.unique(yp2), yv))\n assert np.all(yp == yp2)\n pp = clf.predict_proba(X)\n assert pp.shape[0] == len(yp)\n assert pp.shape[1] == len(yv)\n score = clf.score(X, y)\n assert score <= 1.0 and score >= 0.0\n\n\ndef test_pipe_classification():\n # no context data, single time series\n X = [np.random.rand(1000, 10)]\n y = [5]\n\n pipe = Pype([('seg', SegmentX()),\n ('ftr', FeatureRep()),\n ('rf', RandomForestClassifier(n_estimators=10))])\n\n classifier_test(pipe, X, y)\n\n # context data, single time seres\n Xt = [np.random.rand(1000, 10)]\n Xc = [np.random.rand(3)]\n X = TS_Data(Xt, Xc)\n y = [5]\n classifier_test(pipe, X, y)\n\n X = pd.DataFrame(Xc)\n X['ts_data'] = Xt\n X = TS_Data.from_df(X)\n classifier_test(pipe, X, y)\n\n # multiple time series\n Xt = [np.random.rand(1000, 10), np.random.rand(100, 10), np.random.rand(500, 10)]\n Xc = np.random.rand(3, 3)\n X = TS_Data(Xt, Xc)\n y = [1, 2, 3]\n classifier_test(pipe, X, y)\n\n X = pd.DataFrame(Xc)\n X['ts_data'] = Xt\n X = TS_Data.from_df(X)\n classifier_test(pipe, X, y)\n\n # univariate data\n Xt = [np.random.rand(1000), np.random.rand(100), np.random.rand(500)]\n Xc = np.random.rand(3)\n X = TS_Data(Xt, Xc)\n y = [1, 2, 3]\n classifier_test(pipe, X, y)\n\n X = pd.DataFrame(Xc)\n X['ts_data'] = Xt\n X = TS_Data.from_df(X)\n classifier_test(pipe, X, y)\n\n\ndef regression_test(clf, X, y):\n yv = yvals(y)\n clf.fit(X, y)\n yp = clf.predict(X)\n ytr, yp2 = clf.transform_predict(X, y)\n assert np.all(np.isin(np.unique(ytr), yv))\n assert len(ytr) == len(yp2)\n assert np.all(yp == yp2)\n score = clf.score(X, y)\n assert score <= 1.0 and score >= 0.0\n\n\ndef test_pipe_regression():\n # no context data, single time series\n X = [np.random.rand(1000, 10)]\n y = [np.random.rand(1000)]\n pipe = Pype([('seg', SegmentXY()),\n ('ftr', FeatureRep()),\n ('ridge', Ridge())])\n regression_test(pipe, X, y)\n\n # context data, single time seres\n Xt = [np.random.rand(1000, 10)]\n Xc = [np.random.rand(3)]\n X = TS_Data(Xt, Xc)\n y = [np.random.rand(1000)]\n regression_test(pipe, X, y)\n\n X = pd.DataFrame(Xc)\n X['ts_data'] = Xt\n X = TS_Data.from_df(X)\n regression_test(pipe, X, y)\n\n # multiple time seres\n Xt = [np.random.rand(1000, 10), np.random.rand(100, 10), np.random.rand(500, 10)]\n Xc = np.random.rand(3, 3)\n X = TS_Data(Xt, Xc)\n y = [np.random.rand(1000), np.random.rand(100), np.random.rand(500)]\n regression_test(pipe, X, y)\n\n X = pd.DataFrame(Xc)\n X['ts_data'] = Xt\n X = TS_Data.from_df(X)\n regression_test(pipe, X, y)\n\n # cross val\n Xt = np.array([np.random.rand(1000, 10)] * 5)\n Xc = np.random.rand(5, 3)\n X = TS_Data(Xt, Xc)\n y = np.array([np.random.rand(1000)] * 5)\n cross_validate(pipe, X, y, cv=3)\n\n X = pd.DataFrame(Xc)\n Xt = [np.random.rand(1000, 10)] * 5\n X['ts_data'] = Xt\n X = TS_Data.from_df(X)\n\n cross_validate(pipe, X, y, cv=3)\n\n\ndef forecast_test(clf, X, y):\n yv = yvals(y)\n clf.fit(X, y)\n yp = clf.predict(X)\n ytr, yp2 = clf.transform_predict(X, y)\n assert np.all(np.isin(np.unique(ytr), yv))\n assert len(ytr) == len(yp2)\n assert np.all(yp == yp2)\n score = clf.score(X, y)\n assert score <= 1.0 and score >= 0.0\n\n\ndef test_pipe_forecast():\n # no context data, single time series\n X = [np.random.rand(1000, 10)]\n y = [np.random.rand(1000)]\n\n pipe = Pype([('seg', SegmentXYForecast()),\n ('ftr', FeatureRep()),\n ('ridge', Ridge())])\n\n forecast_test(pipe, X, y)\n\n # context data, single time seres\n Xt = [np.random.rand(1000, 10)]\n Xc = [np.random.rand(3)]\n X = TS_Data(Xt, Xc)\n y = [np.random.rand(1000)]\n\n forecast_test(pipe, X, y)\n\n X = pd.DataFrame(Xc)\n X['ts_data'] = Xt\n X = TS_Data.from_df(X)\n forecast_test(pipe, X, y)\n\n # multiple time seres\n Xt = [np.random.rand(1000, 10), np.random.rand(100, 10), np.random.rand(500, 10)]\n Xc = np.random.rand(3, 3)\n X = TS_Data(Xt, Xc)\n y = [np.random.rand(1000), np.random.rand(100), np.random.rand(500)]\n\n forecast_test(pipe, X, y)\n\n X = pd.DataFrame(Xc)\n X['ts_data'] = Xt\n X = TS_Data.from_df(X)\n forecast_test(pipe, X, y)\n\n # cross val\n\n Xt = np.array([np.random.rand(1000, 10)] * 5)\n Xc = np.random.rand(5, 3)\n X = TS_Data(Xt, Xc)\n y = np.array([np.random.rand(1000)] * 5)\n\n cross_validate(pipe, X, y, cv=3)\n\n X = pd.DataFrame(Xc)\n Xt = [np.random.rand(1000, 10)] * 5\n X['ts_data'] = Xt\n X = TS_Data.from_df(X)\n cross_validate(pipe, X, y, cv=3)\n\n\ndef test_pipe_PadTrunc():\n # no context data, single time series\n X = [np.random.rand(1000, 10)]\n y = [5]\n pipe = Pype([('trunc', PadTrunc()),\n ('ftr', FeatureRep()),\n ('rf', RandomForestClassifier(n_estimators=10))])\n classifier_test(pipe, X, y)\n\n # context data, single time seres\n Xt = [np.random.rand(1000, 10)]\n Xc = [np.random.rand(3)]\n X = TS_Data(Xt, Xc)\n y = [5]\n classifier_test(pipe, X, y)\n\n X = pd.DataFrame(Xc)\n X['ts_data'] = Xt\n classifier_test(pipe, X, y)\n\n # multiple time series\n Xt = [np.random.rand(1000, 10), np.random.rand(100, 10), np.random.rand(500, 10)]\n Xc = np.random.rand(3, 3)\n X = TS_Data(Xt, Xc)\n y = [1, 2, 3]\n classifier_test(pipe, X, y)\n\n X = pd.DataFrame(Xc)\n X['ts_data'] = Xt\n classifier_test(pipe, X, y)\n\n # univariate data\n Xt = [np.random.rand(1000), np.random.rand(100), np.random.rand(500)]\n Xc = np.random.rand(3)\n X = TS_Data(Xt, Xc)\n y = [1, 2, 3]\n classifier_test(pipe, X, y)\n\n X = pd.DataFrame(Xc)\n X['ts_data'] = Xt\n X = TS_Data.from_df(X)\n classifier_test(pipe, X, y)\n" ]
[ [ "sklearn.ensemble.RandomForestClassifier", "numpy.unique", "pandas.DataFrame", "numpy.all", "numpy.atleast_1d", "numpy.concatenate", "sklearn.linear_model.Ridge", "numpy.random.rand", "sklearn.model_selection.cross_validate", "sklearn.preprocessing.StandardScaler" ] ]
csalt-research/OpenASR-py
[ "9aea6753689d87d321260d7eb0ea0544e1b3403a" ]
[ "data/dataloaders.py" ]
[ "import torch\nimport glob\nimport os\nfrom utils.logging import logger\nimport numpy as np\nimport random\nfrom itertools import islice\n\ndef chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]\n\n\"\"\"\nSimple dataloader to cycle through a list of shard paths.\n\"\"\"\nclass ShardedDataLoader(object):\n def __init__(self,\n shard_root_dir,\n batch_size,\n bucket_size,\n padding_idx=0,\n mode='train',\n repeat=True\n ):\n # Collect all shard paths\n shard_paths = glob.glob(\n '%s/**/%s.*.pt' % (shard_root_dir, mode), \n recursive=True)\n self.shard_paths = sorted(shard_paths)\n\n # Store parameters\n self.batch_size = batch_size\n self.bucket_size = bucket_size\n self.padding_idx = padding_idx\n self.mode = mode\n self.repeat = repeat\n\n # Load first shard\n self.pointer = 0\n self.load_next_shard()\n\n def load_next_shard(self):\n path = self.shard_paths[self.pointer]\n self.pointer = (self.pointer + 1) % len(self.shard_paths)\n\n self.shard = torch.load(path)\n n_examples = len(self.shard['src'])\n idx = np.arange(n_examples).tolist()\n self.order = np.concatenate([random.sample(x, len(x)) for x in chunks(idx, self.bucket_size)])\n logger.info('Loaded %d examples from %s' % (n_examples, path))\n\n def pad(self, lst, max_len=None, sort=False):\n \"\"\"\n Pad given list of sequences.\n lst - list of sequences with shape [T x D]\n max_len - length upto which each sequence is to be padded\n sort - whether to sort by descending order of lengths\n \"\"\"\n lengths = np.asarray([x.shape[0] for x in lst])\n T = max_len if max_len is not None else max(lengths)\n B = len(lst)\n D = lst[0].shape[1]\n ret = np.full([T, B, D], self.padding_idx)\n for i, x in enumerate(lst):\n ret[:x.shape[0], i, :] = x\n if sort:\n order = np.argsort(lengths)[::-1]\n return ret[:, order, :], lengths[order], order\n else:\n return ret, lengths\n\n def __iter__(self):\n while True:\n current_batch = self.order[:self.batch_size]\n self.order = self.order[self.batch_size:]\n\n src = self.shard['src'][current_batch]\n tgt = self.shard['indices'][current_batch]\n feat = self.shard['feats'] \n\n src, src_len, idx = self.pad(src, sort=True)\n tgt, tgt_len = self.pad(tgt)\n tgt, tgt_len = tgt[:, idx], tgt_len[idx]\n \n # print(src.shape, src_len.shape, tgt.shape, tgt_len.shape)\n if feat['accent'] == 0: \n yield torch.FloatTensor(src), torch.LongTensor(src_len), \\\n torch.LongTensor(tgt), torch.LongTensor(tgt_len), feat\n\n if self.order.size == 0:\n if self.pointer == 0 and not self.repeat:\n self.load_next_shard()\n break\n else:\n self.load_next_shard()\n" ]
[ [ "torch.LongTensor", "torch.load", "numpy.asarray", "numpy.arange", "numpy.full", "torch.FloatTensor", "numpy.argsort" ] ]
MashmallowWoR/ADef-master
[ "1b0bd40cb3b0d555669f2c728bfb0c0b61b5dc04" ]
[ "demo_imagenet.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport PIL.Image as Image\n\nimport torch\nfrom torch.autograd import Variable\nimport torchvision.models as models\nimport torchvision.transforms as transforms\n\nfrom deformation import ADef\nfrom vector_fields import draw_vector_field\n\npath_to_resources = 'resources/'\n\nWords = open(os.path.join(path_to_resources + 'synset_words.txt'), 'r').read().split('\\n')\n\n# Image is WIDTH x WIDTH x 3\nWIDTH = 299\n\n#net = models.resnet101(pretrained=True) # Use WIDTH = 224\nnet = models.inception_v3(pretrained=True) # Use WIDTH = 299\nnet.eval() # Turn off dropout and such\nprint('Model: ' + type(net).__name__)\n\n# Image with min(width,height) >= WIDTH.\nim_name = path_to_resources + 'im0.jpg'\nimage_PIL = Image.open(im_name)\nprint('Image: ' + im_name)\n\n# Create tensor compatible with 'net'\nmean = np.array([0.485, 0.456, 0.406])\nstd = np.array([0.229, 0.224, 0.225])\nnormalize = transforms.Normalize( mean=mean, std=std )\nimage = transforms.Compose([\n transforms.Resize(WIDTH),\n transforms.CenterCrop(WIDTH),\n transforms.ToTensor(),\n normalize\n ])(image_PIL)\n\n# 'net' accepts batches of images as Variable.\nx = Variable( torch.unsqueeze( image, 0 ) )\nFx = net(x)\nmaxval, label = torch.max( Fx.data, 1 )\nlabel = label.item()\n\n# ADef config\ncandidates = 1\nmax_iter = 100\nmax_norm = np.inf\nsigma = 1\novershoot = 1.1\nstrong_targets = False\n\n# Deform image using ADef\ndef_image, def_data = ADef( image, net, ind_candidates=candidates,\n max_norm=max_norm, max_iter=max_iter,\n smooth=sigma, overshoot=overshoot,\n targeting=strong_targets )\n\ndef_image = def_image[0]\ndef_label = def_data['deformed_labels'][0]\nvec_field = def_data['vector_fields'][0]\n\n\n# Get plottable images\nunnormalize = transforms.Compose([\n transforms.Normalize( [0,0,0], 1/std ),\n transforms.Normalize( -mean, [1,1,1] )\n ])\ndef_image_PIL = transforms.Compose([\n unnormalize,\n transforms.ToPILImage()\n ])( def_image )\nimage_PIL = transforms.Compose([\n unnormalize,\n transforms.ToPILImage()\n ])( image )\n\n\n# Size of perturbation:\npertl2 = np.linalg.norm( image.numpy().ravel() - def_image.numpy().ravel(), ord=2 )\npertlinf = np.linalg.norm( image.numpy().ravel() - def_image.numpy().ravel(), ord=np.inf )\n# Size of vector field:\nvecnorms = np.sqrt( vec_field[:,:,0]**2 + vec_field[:,:,1]**2 ).numpy()\nvfl2 = np.linalg.norm( vecnorms.ravel(), ord=2 )\nvfT = np.linalg.norm( vecnorms.ravel(), ord=np.inf )\n\n\n# Plot results\nfig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2)\nfig.set_size_inches(7,8)\n\nax1.imshow( image_PIL )\nax2.imshow( def_image_PIL )\nbg_image = np.array(def_image_PIL)//2 + 128\nax3.imshow( bg_image )\ndraw_vector_field( ax3, vec_field, amp=4, tol=0.01 )\npert = 1.*np.array(def_image_PIL) - 1.*np.array(image_PIL)\nax4.imshow( pert )\n\noriginal_title = Words[label][10:].split(',')[0]\ndeformed_title = Words[def_label][10:].split(',')[0]\nax1.set_title( 'Original: ' + original_title )\nax2.set_title( 'Deformed: ' + deformed_title )\nax3.set_title( 'Vector field' )\nax4.set_title( 'Perturbation' )\n\nax3.set_xlabel(r'$\\ell^2$-norm: %.3f, $T$-norm: %.3f' %( vfl2, vfT ) )\nax4.set_xlabel(r'$\\ell^2$-norm: %.3f, $\\ell^\\infty$-norm: %.3f' %( pertl2, pertlinf ) )\n\nplt.show()\n" ]
[ [ "torch.max", "numpy.sqrt", "matplotlib.pyplot.subplots", "torch.unsqueeze", "numpy.array", "matplotlib.pyplot.show" ] ]
AnglewoodJack/data_science_champ_rtsoft_case
[ "e3951115765254df5e04543abbb4edb0749d6fe0" ]
[ "predict.py" ]
[ "def predict(file_list, lock_period=None):\n\t'''\n\n\t'''\n\t# ---------------------------------------------IMPORT-PACKAGES------------------------------------------------------\n\timport pickle\n\timport pandas as pd\n\timport numpy as np\n\tfrom preparation import data_preparation\n\tfrom datetime import timedelta\n\n\t# -----------------------------------------------LOAD-DATA----------------------------------------------------------\n\t# load previous dataset\n\twith open('model.pickle', 'rb') as fp:\n\t\tdata, xgb, gbr = pickle.load(fp)\n\n\t# load update\n\tupdate = data_preparation(file_list, lock_period, impute=False)\n\n\t# concatenate datasets\n\tdata = data.append(update)\n\n\t# ------------------------------------------MAKE-TEST-DATASET-------------------------------------------------------\n\n\t# make time period for the next day\n\tend_date = (data.index[-1] + timedelta(hours=24)).strftime('%Y-%m-%d %H:%M')\n\tstart_date = (data.index[-1] + timedelta(hours=1)).strftime('%Y-%m-%d %H:%M')\n\ttemp = pd.DataFrame(pd.date_range(start=start_date,end=end_date, freq='H'), columns=['time'])\n\t# convert to pandas datetime format\n\ttemp = temp.apply(lambda row: pd.to_datetime(row), axis=0).set_index('time')\n\n\t# concatenate datasets\n\tdf = data.append(temp)\n\t# shift data by 24 hour for prediction\n\tdf.iloc[:, 1:] = df.iloc[:, 1:].shift(24)\n\t# start date index\n\tinx = df.index.get_loc(start_date)\n\t# slice out test dataset\n\tx_test = df.iloc[inx:, 1:]\n\n\t# -----------------------------------------MAKE-PREDICTION----------------------------------------------------------\n\n\ty_xgb = xgb.predict(x_test)\n\ty_gbr = gbr.predict(x_test)\n\n\ty_pred = (y_xgb + y_gbr)/2\n\n\treturn np.array(x_test.index), y_pred\n\n\n\n\n\n\n" ]
[ [ "numpy.array", "pandas.to_datetime", "pandas.date_range" ] ]
wwymak/sagemaker-deployment
[ "e2c81ab5044fc3653e133ffabb5478fb4eddc7fe" ]
[ "Project/train/train.py" ]
[ "import argparse\nimport json\nimport os\nimport pickle\nimport sys\nimport sagemaker_containers\nimport pandas as pd\nimport torch\nimport torch.optim as optim\nimport torch.utils.data\n\nfrom model import LSTMClassifier\n\ndef model_fn(model_dir):\n \"\"\"Load the PyTorch model from the `model_dir` directory.\"\"\"\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])\n\n # Load the stored model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n # Load the saved word_dict.\n word_dict_path = os.path.join(model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'rb') as f:\n model.word_dict = pickle.load(f)\n\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model\n\ndef _get_train_data_loader(batch_size, training_dir):\n print(\"Get train data loader.\")\n\n train_data = pd.read_csv(os.path.join(training_dir, \"train.csv\"), header=None, names=None)\n\n train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()\n train_X = torch.from_numpy(train_data.drop([0], axis=1).values).long()\n\n train_ds = torch.utils.data.TensorDataset(train_X, train_y)\n\n return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)\n\n\ndef train(model, train_loader, epochs, optimizer, loss_fn, device):\n \"\"\"\n This is the training method that is called by the PyTorch training script. The parameters\n passed are as follows:\n model - The PyTorch model that we wish to train.\n train_loader - The PyTorch DataLoader that should be used during training.\n epochs - The total number of epochs to train for.\n optimizer - The optimizer to use during training.\n loss_fn - The loss function used for training.\n device - Where the model and data should be loaded (gpu or cpu).\n \"\"\"\n for epoch in range(1, epochs + 1):\n model.train()\n total_loss = 0\n for batch in train_loader: \n batch_X, batch_y = batch\n \n batch_X = batch_X.to(device)\n batch_y = batch_y.to(device)\n \n optimizer.zero_grad()\n prediction = model(batch_X)\n label = batch_y\n loss = loss_fn(prediction, label)\n loss.backward()\n optimizer.step()\n \n \n total_loss += loss.data.item()\n print(\"Epoch: {}, BCELoss: {}\".format(epoch, total_loss / len(train_loader)))\n\n\nif __name__ == '__main__':\n # All of the model parameters and training parameters are sent as arguments when the script\n # is executed. Here we set up an argument parser to easily access the parameters.\n\n parser = argparse.ArgumentParser()\n\n # Training Parameters\n parser.add_argument('--batch-size', type=int, default=512, metavar='N',\n help='input batch size for training (default: 512)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n\n # Model Parameters\n parser.add_argument('--embedding_dim', type=int, default=32, metavar='N',\n help='size of the word embeddings (default: 32)')\n parser.add_argument('--hidden_dim', type=int, default=100, metavar='N',\n help='size of the hidden dimension (default: 100)')\n parser.add_argument('--vocab_size', type=int, default=5000, metavar='N',\n help='size of the vocabulary (default: 5000)')\n\n # SageMaker Parameters\n parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))\n parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])\n parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAINING'])\n parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])\n\n args = parser.parse_args()\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"Using device {}.\".format(device))\n\n torch.manual_seed(args.seed)\n\n # Load the training data.\n train_loader = _get_train_data_loader(args.batch_size, args.data_dir)\n\n # Build the model.\n model = LSTMClassifier(args.embedding_dim, args.hidden_dim, args.vocab_size).to(device)\n\n with open(os.path.join(args.data_dir, \"word_dict.pkl\"), \"rb\") as f:\n model.word_dict = pickle.load(f)\n\n print(\"Model loaded with embedding_dim {}, hidden_dim {}, vocab_size {}.\".format(\n args.embedding_dim, args.hidden_dim, args.vocab_size\n ))\n\n # Train the model.\n optimizer = optim.Adam(model.parameters())\n loss_fn = torch.nn.BCELoss()\n\n train(model, train_loader, args.epochs, optimizer, loss_fn, device)\n\n # Save the parameters used to construct the model\n model_info_path = os.path.join(args.model_dir, 'model_info.pth')\n with open(model_info_path, 'wb') as f:\n model_info = {\n 'embedding_dim': args.embedding_dim,\n 'hidden_dim': args.hidden_dim,\n 'vocab_size': args.vocab_size,\n }\n torch.save(model_info, f)\n\n\t# Save the word_dict\n word_dict_path = os.path.join(args.model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'wb') as f:\n pickle.dump(model.word_dict, f)\n\n\t# Save the model parameters\n model_path = os.path.join(args.model_dir, 'model.pth')\n with open(model_path, 'wb') as f:\n torch.save(model.cpu().state_dict(), f)\n" ]
[ [ "torch.load", "torch.manual_seed", "torch.utils.data.TensorDataset", "torch.utils.data.DataLoader", "torch.from_numpy", "torch.nn.BCELoss", "torch.cuda.is_available", "torch.save" ] ]
dbash/umi2i_correctness
[ "34d794c95a1226db80aa07634b1cfae442b7abc8" ]
[ "synaction/detect_pose_folder.py" ]
[ "import sys\nimport glob\n\nimport tqdm\nimport imageio\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport tensorflow as tf\n\ntfl_url = 'https://storage.googleapis.com/download.tensorflow.org/models/tflite/posenet_mobilenet_v1_100_257x257_multi_kpt_stripped.tflite'\n\ndef run_posenet(*inputs):\n tfl_path = tf.keras.utils.get_file('model.tflite', tfl_url, cache_dir='/tmp/')\n model = tf.lite.Interpreter(model_path=tfl_path)\n model.allocate_tensors()\n\n input_details = model.get_input_details()\n output_details = model.get_output_details()\n\n assert len(input_details) == len(inputs)\n\n for input, det in zip(inputs, input_details):\n model.set_tensor(det['index'], input)\n\n model.invoke()\n outputs = {det['name']: model.get_tensor(det['index']) for det in output_details}\n return outputs\n\n\ndef predict_pose(img):\n assert img.dtype == np.uint8\n assert img.ndim == 3\n\n orig_size = img.shape[:2]\n final_size = np.max(orig_size)\n xy_borders = [final_size - s for s in orig_size]\n boxed_img = tf.cast(tf.image.resize_with_pad(img, final_size, final_size), tf.uint8)\n pose_boxed = predict_pose_square_img(boxed_img)\n pose_boxed_px = pose_boxed * final_size - np.array(xy_borders)[::-1] / 2\n return pose_boxed_px\n\n\ndef predict_pose_square_img(img):\n assert img.dtype == np.uint8\n input_size, pred_stride, n_joints = 257, 32, 17\n output_size = (input_size - 1) // pred_stride + 1\n\n img_re = tf.image.resize(img, (input_size, input_size))[..., :3]\n img_re_11 = tf.cast(img_re, 'float32') / 128 - 1\n outputs = run_posenet(img_re_11[None])\n heatmap = outputs['MobilenetV1/heatmap_2/BiasAdd'][0]\n flat_ht = heatmap.reshape(output_size*output_size, n_joints)\n atgmax_idx = np.unravel_index(np.argmax(flat_ht, axis=0), (output_size, output_size))\n argmax_xy = np.vstack(atgmax_idx)[::-1].T\n offsets = outputs['MobilenetV1/offset_2/BiasAdd'][0]\n offests_x = offsets[argmax_xy[:, 1], argmax_xy[:, 0], np.arange(n_joints)]\n offests_y = offsets[argmax_xy[:, 1], argmax_xy[:, 0], n_joints + np.arange(n_joints)]\n offests_xy = np.vstack([offests_x, offests_y])[::-1].T\n positions = offests_xy + argmax_xy * pred_stride\n positions_01 = positions / input_size\n return positions_01\n\n\nif __name__ == '__main__':\n _, img_folder, out_fn = sys.argv\n \n with open(out_fn, 'w') as f:\n for img_path in tqdm.tqdm(glob.glob(img_folder)):\n img = imageio.imread(img_path)\n\n pose = predict_pose(img)\n short_idx = [0] + list(range(5, 17))\n short_pose = pose[short_idx]\n coord_str = ['%d' % x for x in np.ravel(short_pose)]\n f.write(img_path + ' ' + ' '.join(coord_str) + '\\n')\n" ]
[ [ "tensorflow.lite.Interpreter", "numpy.arange", "tensorflow.cast", "numpy.max", "tensorflow.image.resize", "tensorflow.keras.utils.get_file", "numpy.argmax", "numpy.ravel", "tensorflow.image.resize_with_pad", "numpy.array", "numpy.vstack" ] ]
cl886699/frcnn_multigpu
[ "eed28bd3eafdf43957ea66b4ab6198d7dca57385" ]
[ "detection/core/anchor/anchor_target.py" ]
[ "import tensorflow as tf\n\nfrom detection.core.bbox import geometry, transforms\nfrom detection.utils.misc import trim_zeros\n\nclass AnchorTarget(object):\n def __init__(self,\n target_means=(0., 0., 0., 0.), \n target_stds=(0.1, 0.1, 0.2, 0.2),\n num_rpn_deltas=256,\n positive_fraction=0.5,\n pos_iou_thr=0.7,\n neg_iou_thr=0.3):\n '''Compute regression and classification targets for anchors.\n \n Attributes\n ---\n target_means: [4]. Bounding box refinement mean for RPN.\n target_stds: [4]. Bounding box refinement standard deviation for RPN.\n num_rpn_deltas: int. Maximal number of Anchors per image to feed to rpn heads.\n positive_fraction: float.\n pos_iou_thr: float.\n neg_iou_thr: float.\n '''\n self.target_means = target_means\n self.target_stds = target_stds\n self.num_rpn_deltas = num_rpn_deltas\n self.positive_fraction = positive_fraction\n self.pos_iou_thr = pos_iou_thr\n self.neg_iou_thr = neg_iou_thr\n\n def build_targets(self, anchors, gt_boxes, gt_class_ids):\n '''Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n\n Args\n ---\n anchors: [num_anchors, (y1, x1, y2, x2)] in image coordinates.\n valid_flags: [batch_size, num_anchors]\n gt_boxes: [batch_size, num_gt_boxes, (y1, x1, y2, x2)] in image \n coordinates.\n gt_class_ids: [batch_size, num_gt_boxes] Integer class IDs.\n\n Returns\n ---\n rpn_labels: [batch_size, num_anchors] \n Matches between anchors and GT boxes. 1 - positive samples; 0 - negative samples; -1 - neglect\n rpn_label_weights: [batch_size, num_anchors] \n rpn_delta_targets: [batch_size, num_anchors, (dy, dx, log(dh), log(dw))] \n Anchor bbox deltas.\n rpn_delta_weights: [batch_size, num_anchors, 4]\n '''\n rpn_labels = []\n rpn_label_weights = []\n rpn_delta_targets = []\n rpn_delta_weights = []\n \n num_imgs = gt_class_ids.shape[0]\n for i in range(num_imgs):\n labels, label_weights, delta_targets, delta_weights = self._build_single_target(\n anchors, gt_boxes[i], gt_class_ids[i])\n rpn_labels.append(labels)\n rpn_label_weights.append(label_weights)\n rpn_delta_targets.append(delta_targets)\n rpn_delta_weights.append(delta_weights)\n \n rpn_labels = tf.stack(rpn_labels)\n rpn_label_weights = tf.stack(rpn_label_weights)\n rpn_delta_targets = tf.stack(rpn_delta_targets)\n rpn_delta_weights = tf.stack(rpn_delta_weights)\n \n return rpn_labels, rpn_label_weights, rpn_delta_targets, rpn_delta_weights\n\n def _build_single_target(self, anchors, gt_boxes, gt_class_ids):\n '''Compute targets per instance.\n \n Args\n ---\n anchors: [num_anchors, (y1, x1, y2, x2)]\n valid_flags: [num_anchors]\n gt_class_ids: [num_gt_boxes]\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n \n Returns\n ---\n labels: [num_anchors]\n label_weights: [num_anchors]\n delta_targets: [num_anchors, (dy, dx, log(dh), log(dw))] \n delta_weights: [num_anchors, 4]\n '''\n gt_boxes, _ = trim_zeros(gt_boxes)\n\n labels = -tf.ones(anchors.shape[0], dtype=tf.int32)\n \n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = geometry.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n \n \n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them.\n anchor_iou_argmax = tf.argmax(overlaps, axis=1)\n anchor_iou_max = tf.reduce_max(overlaps, axis=[1])\n \n labels = tf.where(anchor_iou_max < self.neg_iou_thr, \n tf.zeros(anchors.shape[0], dtype=tf.int32), labels)\n\n # 2. Set anchors with high overlap as positive.\n labels = tf.where(anchor_iou_max >= self.pos_iou_thr, \n tf.ones(anchors.shape[0], dtype=tf.int32), labels)\n\n # 3. Set an anchor for each GT box (regardless of IoU value). \n gt_iou_argmax = tf.argmax(overlaps, axis=0)\n labels = tf.tensor_scatter_nd_update(labels, \n tf.reshape(gt_iou_argmax, (-1, 1)), \n tf.ones(gt_iou_argmax.shape, dtype=tf.int32))\n \n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = tf.where(tf.equal(labels, 1))\n extra = ids.shape.as_list()[0] - int(self.num_rpn_deltas * self.positive_fraction)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = tf.random.shuffle(ids)[:extra]\n labels = tf.tensor_scatter_nd_update(labels, \n ids, \n -tf.ones(ids.shape[0], dtype=tf.int32))\n # Same for negative proposals\n ids = tf.where(tf.equal(labels, 0))\n extra = ids.shape.as_list()[0] - (self.num_rpn_deltas -\n tf.reduce_sum(tf.cast(tf.equal(labels, 1), tf.int32)))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = tf.random.shuffle(ids)[:extra]\n labels = tf.tensor_scatter_nd_update(labels, \n ids, \n -tf.ones(ids.shape[0], dtype=tf.int32))\n \n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n\n # Compute box deltas\n gt = tf.gather(gt_boxes, anchor_iou_argmax)\n delta_targets = transforms.bbox2delta(anchors, gt, self.target_means, self.target_stds)\n\n # Compute weights\n label_weights = tf.zeros((anchors.shape[0],), dtype=tf.float32)\n delta_weights = tf.zeros((anchors.shape[0],), dtype=tf.float32)\n num_bfg = tf.where(tf.greater_equal(labels, 0)).shape[0]\n if num_bfg > 0:\n label_weights = tf.where(labels >= 0, \n tf.ones(label_weights.shape, dtype=tf.float32) / num_bfg, \n label_weights)\n\n delta_weights = tf.where(labels > 0, \n tf.ones(delta_weights.shape, dtype=tf.float32) / num_bfg, \n delta_weights)\n delta_weights = tf.tile(tf.reshape(delta_weights, (-1, 1)), [1, 4])\n \n return labels, label_weights, delta_targets, delta_weights" ]
[ [ "tensorflow.reduce_max", "tensorflow.zeros", "tensorflow.stack", "tensorflow.reshape", "tensorflow.equal", "tensorflow.ones", "tensorflow.gather", "tensorflow.random.shuffle", "tensorflow.argmax", "tensorflow.greater_equal" ] ]
Koushik0901/Swift-SRGAN
[ "cd2414f097853bd500e4aec050379b5d37b29ac6" ]
[ "swift-srgan/metric.py" ]
[ "import math\nimport torch\nimport torch.nn.functional as F\n\n\n\ndef gaussian(window_size, sigma):\n gauss = torch.Tensor([math.exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])\n return gauss / gauss.sum()\n\n\ndef create_window(window_size, channel):\n _1D_window = gaussian(window_size, 1.5).unsqueeze(1)\n _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)\n window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()\n return window\n\n\ndef _ssim(img1, img2, window, window_size, channel, size_average=True):\n mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)\n mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)\n\n mu1_sq = mu1.pow(2)\n mu2_sq = mu2.pow(2)\n mu1_mu2 = mu1 * mu2\n\n sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq\n sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq\n sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2\n\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n\n ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))\n\n if size_average:\n return ssim_map.mean()\n else:\n return ssim_map.mean(1).mean(1).mean(1)\n\n\nclass SSIM(torch.nn.Module):\n def __init__(self, window_size=11, size_average=True):\n super(SSIM, self).__init__()\n self.window_size = window_size\n self.size_average = size_average\n self.channel = 1\n self.window = create_window(window_size, self.channel)\n\n def forward(self, img1, img2):\n (_, channel, _, _) = img1.size()\n\n if channel == self.channel and self.window.data.type() == img1.data.type():\n window = self.window\n else:\n window = create_window(self.window_size, channel)\n\n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n\n self.window = window\n self.channel = channel\n\n return _ssim(img1, img2, window, self.window_size, channel, self.size_average)\n\n\ndef ssim(img1, img2, window_size=11, size_average=True):\n (_, channel, _, _) = img1.size()\n window = create_window(window_size, channel)\n\n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n\n return _ssim(img1, img2, window, window_size, channel, size_average)" ]
[ [ "torch.nn.functional.conv2d" ] ]
avuku06/G
[ "8b505691f305783d66a1976ad51748073d2e76f6" ]
[ "face_anti-spoof_challenge@CVPR_zpeng/models/fishnet.py" ]
[ "'''\nFishNet\nAuthor: Shuyang Sun\n'''\nfrom __future__ import division\nimport torch\nimport math\nfrom .fish_block import *\n\n\n__all__ = ['fish']\n\n\n\n\nclass Fish(nn.Module):\n def __init__(self, block, num_cls=2, num_down_sample=5, num_up_sample=3, trans_map=(2, 1, 0, 6, 5, 4),\n network_planes=None, num_res_blks=None, num_trans_blks=None):\n super(Fish, self).__init__()\n self.block = block\n self.trans_map = trans_map\n self.upsample = nn.Upsample(scale_factor=2)\n self.down_sample = nn.MaxPool2d(2, stride=2)\n self.num_cls = num_cls\n self.num_down = num_down_sample\n self.num_up = num_up_sample\n self.network_planes = network_planes[1:]\n self.depth = len(self.network_planes)\n self.num_trans_blks = num_trans_blks\n self.num_res_blks = num_res_blks\n self.fish = self._make_fish(network_planes[0])\n \n \n\n def _make_score(self, in_ch, out_ch=1000, has_pool=False):\n bn = nn.BatchNorm2d(in_ch)\n relu = nn.ReLU(inplace=True)\n conv_trans = nn.Conv2d(in_ch, in_ch // 2, kernel_size=1, bias=False)\n bn_out = nn.BatchNorm2d(in_ch // 2)\n conv = nn.Sequential(bn, relu, conv_trans, bn_out, relu)\n if has_pool:\n fc = nn.Sequential(\n nn.AdaptiveAvgPool2d(1),\n nn.Conv2d(in_ch // 2, out_ch, kernel_size=1, bias=True))\n else:\n fc = nn.Conv2d(in_ch // 2, out_ch, kernel_size=1, bias=True)\n return [conv, fc]\n\n def _make_se_block(self, in_ch, out_ch):\n bn = nn.BatchNorm2d(in_ch)\n sq_conv = nn.Conv2d(in_ch, out_ch // 16, kernel_size=1)\n ex_conv = nn.Conv2d(out_ch // 16, out_ch, kernel_size=1)\n return nn.Sequential(bn,\n nn.ReLU(inplace=True),\n nn.AdaptiveAvgPool2d(1),\n sq_conv,\n nn.ReLU(inplace=True),\n ex_conv,\n nn.Sigmoid())\n\n def _make_residual_block(self, inplanes, outplanes, nstage, is_up=False, k=1, dilation=1):\n layers = []\n\n if is_up:\n layers.append(self.block(inplanes, outplanes, mode='UP', dilation=dilation, k=k))\n else:\n layers.append(self.block(inplanes, outplanes, stride=1))\n for i in range(1, nstage):\n layers.append(self.block(outplanes, outplanes, stride=1, dilation=dilation))\n return nn.Sequential(*layers)\n\n def _make_stage(self, is_down_sample, inplanes, outplanes, n_blk, has_trans=True,\n has_score=False, trans_planes=0, no_sampling=False, num_trans=2, **kwargs):\n sample_block = []\n if has_score:\n sample_block.extend(self._make_score(outplanes, outplanes * 2, has_pool=False))\n\n if no_sampling or is_down_sample:\n res_block = self._make_residual_block(inplanes, outplanes, n_blk, **kwargs)\n else:\n res_block = self._make_residual_block(inplanes, outplanes, n_blk, is_up=True, **kwargs)\n\n sample_block.append(res_block)\n\n if has_trans:\n trans_in_planes = self.in_planes if trans_planes == 0 else trans_planes\n sample_block.append(self._make_residual_block(trans_in_planes, trans_in_planes, num_trans))\n\n if not no_sampling and is_down_sample:\n sample_block.append(self.down_sample)\n elif not no_sampling: # Up-Sample\n sample_block.append(self.upsample)\n\n return nn.ModuleList(sample_block)\n\n def _make_fish(self, in_planes):\n def get_trans_planes(index):\n map_id = self.trans_map[index-self.num_down-1] - 1\n p = in_planes if map_id == -1 else cated_planes[map_id]\n return p\n\n def get_trans_blk(index):\n return self.num_trans_blks[index-self.num_down-1]\n\n def get_cur_planes(index):\n return self.network_planes[index]\n\n def get_blk_num(index):\n return self.num_res_blks[index]\n\n cated_planes, fish = [in_planes] * self.depth, []\n for i in range(self.depth):\n # even num for down-sample, odd for up-sample\n is_down, has_trans, no_sampling = i not in range(self.num_down, self.num_down+self.num_up+1),\\\n i > self.num_down, i == self.num_down\n cur_planes, trans_planes, cur_blocks, num_trans =\\\n get_cur_planes(i), get_trans_planes(i), get_blk_num(i), get_trans_blk(i)\n\n stg_args = [is_down, cated_planes[i - 1], cur_planes, cur_blocks]\n\n if is_down or no_sampling:\n k, dilation = 1, 1\n else:\n k, dilation = cated_planes[i - 1] // cur_planes, 2 ** (i-self.num_down-1)\n\n sample_block = self._make_stage(*stg_args, has_trans=has_trans, trans_planes=trans_planes,\n has_score=(i==self.num_down), num_trans=num_trans, k=k, dilation=dilation,\n no_sampling=no_sampling)\n if i == self.depth - 1:\n sample_block.extend(self._make_score(cur_planes + trans_planes, has_pool=True))\n elif i == self.num_down:\n sample_block.append(nn.Sequential(self._make_se_block(cur_planes*2, cur_planes)))\n\n if i == self.num_down-1:\n cated_planes[i] = cur_planes * 2\n elif has_trans:\n cated_planes[i] = cur_planes + trans_planes\n else:\n cated_planes[i] = cur_planes\n fish.append(sample_block)\n return nn.ModuleList(fish)\n\n def _fish_forward(self, all_feat):\n def _concat(a, b):\n return torch.cat([a, b], dim=1)\n\n def stage_factory(*blks):\n def stage_forward(*inputs):\n if stg_id < self.num_down: # tail\n tail_blk = nn.Sequential(*blks[:2])\n return tail_blk(*inputs)\n elif stg_id == self.num_down:\n score_blks = nn.Sequential(*blks[:2])\n score_feat = score_blks(inputs[0])\n att_feat = blks[3](score_feat)\n return blks[2](score_feat) * att_feat + att_feat\n else: # refine\n feat_trunk = blks[2](blks[0](inputs[0]))\n feat_branch = blks[1](inputs[1])\n return _concat(feat_trunk, feat_branch)\n return stage_forward\n\n stg_id = 0\n # tail:\n while stg_id < self.depth:\n stg_blk = stage_factory(*self.fish[stg_id])\n if stg_id <= self.num_down:\n in_feat = [all_feat[stg_id]]\n else:\n trans_id = self.trans_map[stg_id-self.num_down-1]\n in_feat = [all_feat[stg_id], all_feat[trans_id]]\n\n all_feat[stg_id + 1] = stg_blk(*in_feat)\n stg_id += 1\n # loop exit\n if stg_id == self.depth:\n score_feat = self.fish[self.depth-1][-2](all_feat[-1])\n score = self.fish[self.depth-1][-1](score_feat)\n return score\n\n def forward(self, x):\n all_feat = [None] * (self.depth + 1)\n all_feat[0] = x\n return self._fish_forward(all_feat)\n\n\nclass FishNet(nn.Module):\n def __init__(self, block, **kwargs):\n super(FishNet, self).__init__()\n\n inplanes = kwargs['network_planes'][0]\n # resolution: 224x224\n self.conv1 = self._conv_bn_relu(3, inplanes // 2, stride=2)\n self.conv2 = self._conv_bn_relu(inplanes // 2, inplanes // 2)\n self.conv3 = self._conv_bn_relu(inplanes // 2, inplanes)\n self.pool1 = nn.MaxPool2d(3, padding=1, stride=2)\n # construct fish, resolution 56x56\n self.fish = Fish(block, **kwargs)\n self._init_weights()\n\n def _conv_bn_relu(self, in_ch, out_ch, stride=1):\n return nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1, stride=stride, bias=False),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True))\n\n def _init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.pool1(x)\n score = self.fish(x)\n # 1*1 output\n out = score.view(x.size(0), -1)\n# print(out.shape)\n\n return out\n\n\ndef fish(**kwargs):\n return FishNet(Bottleneck, **kwargs)" ]
[ [ "torch.cat" ] ]
OneDirection9/LASER
[ "00daa5e32fa302e82118186f95a51743fdefa55e" ]
[ "laser/laser_lstm.py" ]
[ "from collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom fairseq import utils\nfrom fairseq.models import (\n FairseqEncoder,\n FairseqIncrementalDecoder,\n FairseqEncoderDecoderModel,\n register_model,\n register_model_architecture,\n)\nfrom fairseq.models.lstm import (\n Embedding,\n Linear,\n LSTM,\n LSTMCell,\n LSTMModel,\n)\n\n\n@register_model('laser')\nclass LaserModel(FairseqEncoderDecoderModel):\n \"\"\"Laser Encoder-Decoder implementation adapted from:\n https://github.com/pytorch/fairseq/blob/master/fairseq/models/lstm.py\n https://github.com/facebookresearch/LASER/blob/master/source/embed.py\n https://github.com/transducens/LASERtrain/blob/master/fairseq-modules/multilingual_lstm_laser.py\n \"\"\"\n\n def __init__(self, encoder, decoder):\n super().__init__(encoder, decoder)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n LSTMModel.add_args(parser)\n parser.add_argument('--lang-embedding-size', type=int, default=32,\n help='language embedding dimension')\n parser.add_argument('--encoder-model-path', type=str, default=None,\n help='path to pretrained model path')\n parser.add_argument('--fix-encoder', action='store_true')\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n from .laser_task import TranslationLaserTask\n assert isinstance(task, TranslationLaserTask)\n\n shared_dict = task.dicts[task.langs[0]]\n if any(task.dicts[lang] != shared_dict for lang in task.langs):\n raise ValueError('This model requires a joined dictionary.')\n\n # make sure that all args are properly defaulted (in case there are any new ones)\n base_architecture(args)\n\n # Languages index: lang codes into integers\n lang_dictionary = {\n task.langs[i]: i for i in range(len(task.langs))\n }\n\n if args.encoder_model_path is not None:\n print(f'* Loading parameters from {args.encoder_model_path}')\n state_dict = torch.load(args.encoder_model_path)\n params = state_dict['params']\n assert len(task.source_dictionary) == params.pop('num_embeddings')\n params['dictionary'] = task.source_dictionary\n encoder = LaserEncoder(**params)\n encoder.load_state_dict(state_dict['model'])\n else:\n encoder = LaserEncoder(\n dictionary=task.source_dictionary,\n embed_dim=args.encoder_embed_dim,\n hidden_size=args.encoder_hidden_size,\n num_layers=args.encoder_layers,\n bidirectional=args.encoder_bidirectional,\n )\n\n if args.fix_encoder:\n print('* Fixing encoder parameters')\n for p in encoder.parameters():\n p.requires_grad = False\n\n decoder = LaserDecoder(\n dictionary=task.target_dictionary,\n lang_dictionary=lang_dictionary,\n embed_dim=args.decoder_embed_dim,\n hidden_size=args.decoder_hidden_size,\n out_embed_dim=args.decoder_out_embed_dim,\n num_layers=args.decoder_layers,\n dropout_in=args.decoder_dropout_in,\n dropout_out=args.decoder_dropout_out,\n attention=False,\n encoder_output_units=int(args.encoder_hidden_size) * 2,\n lang_embedding_size=args.lang_embedding_size,\n )\n return cls(encoder, decoder)\n\n def forward(self, src_tokens, src_lengths, prev_output_tokens, decoder_lang, **kwargs):\n encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, decoder_lang=decoder_lang)\n decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, **kwargs)\n return decoder_out\n\n\nclass LaserEncoder(FairseqEncoder):\n def __init__(\n self, dictionary, padding_idx, embed_dim=320, hidden_size=512, num_layers=1,\n bidirectional=False, left_pad=True, padding_value=0.\n ):\n super().__init__(dictionary)\n\n num_embeddings = len(dictionary)\n\n self.num_layers = num_layers\n self.bidirectional = bidirectional\n self.hidden_size = hidden_size\n\n self.padding_idx = padding_idx\n self.embed_tokens = nn.Embedding(num_embeddings, embed_dim, padding_idx=self.padding_idx)\n\n self.lstm = nn.LSTM(\n input_size=embed_dim,\n hidden_size=hidden_size,\n num_layers=num_layers,\n bidirectional=bidirectional,\n )\n self.left_pad = left_pad\n self.padding_value = padding_value\n\n self.output_units = hidden_size\n if bidirectional:\n self.output_units *= 2\n\n def forward(self, src_tokens, src_lengths, decoder_lang):\n if self.left_pad:\n # convert left-padding to right-padding\n src_tokens = utils.convert_padding_direction(\n src_tokens,\n self.padding_idx,\n left_to_right=True,\n )\n\n bsz, seqlen = src_tokens.size()\n\n # embed tokens\n x = self.embed_tokens(src_tokens)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n # pack embedded source tokens into a PackedSequence\n packed_x = nn.utils.rnn.pack_padded_sequence(x, src_lengths.data.tolist())\n\n # apply LSTM\n if self.bidirectional:\n state_size = 2 * self.num_layers, bsz, self.hidden_size\n else:\n state_size = self.num_layers, bsz, self.hidden_size\n h0 = x.data.new(*state_size).zero_()\n c0 = x.data.new(*state_size).zero_()\n packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0))\n\n # unpack outputs and apply dropout\n x, _ = nn.utils.rnn.pad_packed_sequence(packed_outs, padding_value=self.padding_value)\n assert list(x.size()) == [seqlen, bsz, self.output_units]\n\n if self.bidirectional:\n def combine_bidir(outs):\n return torch.cat([\n torch.cat([outs[2 * i], outs[2 * i + 1]], dim=0).view(1, bsz, self.output_units)\n for i in range(self.num_layers)\n ], dim=0)\n\n final_hiddens = combine_bidir(final_hiddens)\n final_cells = combine_bidir(final_cells)\n\n encoder_padding_mask = src_tokens.eq(self.padding_idx).t()\n\n # Set padded outputs to -inf so they are not selected by max-pooling\n padding_mask = src_tokens.eq(self.padding_idx).t().unsqueeze(-1)\n if padding_mask.any():\n x = x.float().masked_fill_(padding_mask, float('-inf')).type_as(x)\n\n # Build the sentence embedding by max-pooling over the encoder outputs\n sentemb = x.max(dim=0)[0]\n\n return {\n 'sentemb': sentemb,\n 'encoder_out': (x, final_hiddens, final_cells),\n 'encoder_padding_mask': encoder_padding_mask if encoder_padding_mask.any() else None,\n 'decoder_lang': decoder_lang,\n }\n\n def reorder_encoder_out(self, encoder_out, new_order):\n return {\n 'sentemb': encoder_out['sentemb'].index_select(0, new_order),\n 'encoder_out': (\n encoder_out['encoder_out'][0].index_select(1, new_order),\n encoder_out['encoder_out'][1].index_select(1, new_order),\n encoder_out['encoder_out'][2].index_select(1, new_order),\n ),\n 'encoder_padding_mask': encoder_out['encoder_padding_mask'].index_select(1, new_order),\n 'decoder_lang': encoder_out['decoder_lang'],\n }\n\n\nclass LaserDecoder(FairseqIncrementalDecoder):\n\n def __init__(\n self, dictionary, lang_dictionary, embed_dim=512, hidden_size=512, out_embed_dim=512,\n num_layers=1, dropout_in=0.1, dropout_out=0.1, attention=True,\n encoder_output_units=512, pretrained_embed=None,\n share_input_output_embed=False, adaptive_softmax_cutoff=None,\n lang_embedding_size=32\n ):\n super().__init__(dictionary)\n self.dropout_in = dropout_in\n self.dropout_out = dropout_out\n self.hidden_size = hidden_size\n self.share_input_output_embed = share_input_output_embed\n self.lang_embedding_size = lang_embedding_size\n self.lang_dictionary = lang_dictionary\n self.embed_langs = nn.Embedding(len(lang_dictionary), lang_embedding_size)\n self.need_attn = False\n\n self.adaptive_softmax = None\n num_embeddings = len(dictionary)\n padding_idx = dictionary.pad()\n self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)\n\n self.encoder_output_units = encoder_output_units\n self.encoder_hidden_proj = Linear(encoder_output_units, hidden_size)\n self.encoder_cell_proj = Linear(encoder_output_units, hidden_size)\n\n input_size = hidden_size + embed_dim + lang_embedding_size + encoder_output_units\n self.layers = nn.ModuleList([\n LSTMCell(\n input_size=input_size if layer == 0 else hidden_size,\n hidden_size=hidden_size,\n )\n for layer in range(num_layers)\n ])\n\n self.attention = None\n if hidden_size != out_embed_dim:\n self.additional_fc = Linear(hidden_size, out_embed_dim)\n if not self.share_input_output_embed:\n self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out)\n\n def forward(self, prev_output_tokens, encoder_out, incremental_state=None):\n x, attn_scores = self.extract_features(\n prev_output_tokens, encoder_out, incremental_state\n )\n return self.output_layer(x), attn_scores\n\n def extract_features(self, prev_output_tokens, encoder_out, incremental_state=None):\n \"\"\"\n Similar to *forward* but only return features.\n \"\"\"\n encoder_sentemb = encoder_out['sentemb']\n encoder_padding_mask = encoder_out['encoder_padding_mask']\n lang = encoder_out['decoder_lang']\n encoder_out = encoder_out['encoder_out']\n\n if incremental_state is not None:\n prev_output_tokens = prev_output_tokens[:, -1:]\n bsz, seqlen = prev_output_tokens.size()\n\n # get outputs from encoder\n encoder_outs, encoder_hiddens, encoder_cells = encoder_out[:3]\n srclen = encoder_outs.size(0)\n\n # embed tokens\n x = self.embed_tokens(prev_output_tokens)\n x = F.dropout(x, p=self.dropout_in, training=self.training)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n # embed language\n lang_tensor = torch.LongTensor(\n [self.lang_dictionary[lang]] * bsz\n ).to(device=prev_output_tokens.device)\n l = self.embed_langs(lang_tensor)\n\n # initialize previous states (or get from cache during incremental generation)\n cached_state = utils.get_incremental_state(self, incremental_state, 'cached_state')\n if cached_state is not None:\n prev_hiddens, prev_cells, input_feed = cached_state\n else:\n num_layers = len(self.layers)\n prev_hiddens = [encoder_sentemb for i in range(num_layers)]\n prev_cells = [encoder_sentemb for i in range(num_layers)]\n prev_hiddens = [self.encoder_hidden_proj(x) for x in prev_hiddens]\n prev_cells = [self.encoder_cell_proj(x) for x in prev_cells]\n input_feed = x.new_zeros(bsz, self.hidden_size)\n\n outs = []\n for j in range(seqlen):\n # input feeding: concatenate context vector from previous time step\n input = torch.cat((x[j, :, :], encoder_sentemb, input_feed, l), dim=1)\n\n for i, rnn in enumerate(self.layers):\n # recurrent cell\n hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i]))\n\n # hidden state becomes the input to the next layer\n input = F.dropout(hidden, p=self.dropout_out, training=self.training)\n\n # save state for next time step\n prev_hiddens[i] = hidden\n prev_cells[i] = cell\n\n out = hidden\n out = F.dropout(out, p=self.dropout_out, training=self.training)\n\n # input feeding\n input_feed = out\n\n # save final output\n outs.append(out)\n\n # cache previous states (no-op except during incremental generation)\n utils.set_incremental_state(\n self, incremental_state, 'cached_state',\n (prev_hiddens, prev_cells, input_feed),\n )\n\n # collect outputs across time steps\n x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size)\n\n # T x B x C -> B x T x C\n x = x.transpose(1, 0)\n\n if hasattr(self, 'additional_fc') and self.adaptive_softmax is None:\n x = self.additional_fc(x)\n x = F.dropout(x, p=self.dropout_out, training=self.training)\n\n return x, None\n\n def output_layer(self, x):\n \"\"\"Project features to the vocabulary size.\"\"\"\n if self.adaptive_softmax is None:\n if self.share_input_output_embed:\n x = F.linear(x, self.embed_tokens.weight)\n else:\n x = self.fc_out(x)\n return x\n\n def reorder_incremental_state(self, incremental_state, new_order):\n super().reorder_incremental_state(incremental_state, new_order)\n cached_state = utils.get_incremental_state(self, incremental_state, 'cached_state')\n if cached_state is None:\n return\n\n def reorder_state(state):\n if isinstance(state, list):\n return [reorder_state(state_i) for state_i in state]\n return state.index_select(0, new_order)\n\n new_state = tuple(map(reorder_state, cached_state))\n utils.set_incremental_state(self, incremental_state, 'cached_state', new_state)\n\n\n@register_model_architecture('laser', 'laser')\ndef base_architecture(args):\n args.dropout = getattr(args, 'dropout', 0.1)\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)\n args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)\n args.encoder_freeze_embed = getattr(args, 'encoder_freeze_embed', False)\n args.encoder_hidden_size = getattr(args, 'encoder_hidden_size', 512)\n args.encoder_layers = getattr(args, 'encoder_layers', 1)\n args.encoder_bidirectional = getattr(args, 'encoder_bidirectional', True)\n args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', args.dropout)\n args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', args.dropout)\n args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)\n args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)\n args.decoder_freeze_embed = getattr(args, 'decoder_freeze_embed', False)\n args.decoder_hidden_size = getattr(args, 'decoder_hidden_size', 2048)\n args.decoder_layers = getattr(args, 'decoder_layers', 1)\n args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 2048)\n args.decoder_attention = getattr(args, 'decoder_attention', False)\n args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', args.dropout)\n args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', args.dropout)\n args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)\n args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)\n args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)\n" ]
[ [ "torch.LongTensor", "torch.load", "torch.nn.functional.dropout", "torch.nn.LSTM", "torch.cat", "torch.nn.Embedding", "torch.nn.utils.rnn.pad_packed_sequence", "torch.nn.functional.linear" ] ]
dkoes/tensorflow
[ "89c0134fd811a12d8100da385cb9a218247f0933", "b3492ab89abbb7892ad7d4bfc82ee1d7ddafca7e" ]
[ "tensorflow/contrib/tpu/python/tpu/keras_support.py", "tensorflow/contrib/tpu/python/tpu/async_checkpoint.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"*Experimental* support for running Keras models on the TPU.\n\nTo use, wrap your model with the `keras_support.tpu_model` function.\n\nExample usage:\n\n```\nimage = tf.keras.layers.Input(shape=(28, 28, 3), name='image')\nc1 = tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3))( image)\nflattened = tf.keras.layers.Flatten()(c1)\nlogits = tf.keras.layers.Dense(10, activation='softmax')(flattened)\nmodel = tf.keras.Model(inputs=[image], outputs=[logits])\n\nresolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu=tpu_name)\nstrategy = keras_support.TPUDistributionStrategy(resolver)\nmodel = keras_support.tpu_model(model, strategy=strategy)\n\n# Only TF optimizers are currently supported.\nmodel.compile(optimizer=tf.train.AdamOptimizer(), ...)\n\n# `images` and `labels` should be Numpy arrays. Support for tensor input\n# (e.g. datasets) is planned.\nmodel.fit(images, labels)\n```\n\"\"\"\n\n# pylint: disable=protected-access\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\nimport contextlib\nimport re\nimport sys\nimport time\n\nimport numpy as np\nimport six\n\nfrom tensorflow.contrib.cluster_resolver.python.training import tpu_cluster_resolver as tpu_cluster_resolver_lib\nfrom tensorflow.contrib.framework.python.framework import experimental\nfrom tensorflow.contrib.tpu.proto import compilation_result_pb2 as tpu_compilation_result\nfrom tensorflow.contrib.tpu.python.ops import tpu_ops\nfrom tensorflow.contrib.tpu.python.tpu import keras_tpu_variables\nfrom tensorflow.contrib.tpu.python.tpu import tpu\nfrom tensorflow.contrib.tpu.python.tpu import tpu_function\nfrom tensorflow.contrib.tpu.python.tpu import tpu_optimizer\nfrom tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import session as tf_session\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import iterator_ops\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.estimator import model_fn as model_fn_lib\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras import callbacks as cbks\nfrom tensorflow.python.keras import metrics as metrics_module\nfrom tensorflow.python.keras import models\nfrom tensorflow.python.keras import optimizers as keras_optimizers\nfrom tensorflow.python.keras.engine import base_layer\nfrom tensorflow.python.keras.engine import training_arrays\nfrom tensorflow.python.keras.engine import training_utils\nfrom tensorflow.python.keras.layers import embeddings\nfrom tensorflow.python.keras.utils.generic_utils import make_batches\nfrom tensorflow.python.keras.utils.generic_utils import slice_arrays\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_linalg_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import tf_logging as logging\n\n\n# TODO(b/114775106): temporary shim to optionally initialize the TPU\n# This increases the odds our session is initialized, but shouldn't be needed.\n_TEST_REWRITE_OP = None\n\n\ndef _maybe_initialize_tpu(session):\n \"\"\"Initialize the TPU if it has not already been initialized.\"\"\"\n global _TEST_REWRITE_OP\n try:\n # Try to use cached version to avoid another ground of graph optimization.\n test_rewrite_op = _TEST_REWRITE_OP\n if (test_rewrite_op is None or\n test_rewrite_op[0].graph != ops.get_default_graph()):\n\n def test_op():\n return constant_op.constant(1) + constant_op.constant(1)\n\n test_rewrite_op = tpu.rewrite(test_op)\n _TEST_REWRITE_OP = test_rewrite_op\n\n session.run(test_rewrite_op)\n except errors.FailedPreconditionError as _:\n session.run(tpu.initialize_system())\n\n\[email protected]\ndef _tpu_session_context():\n \"\"\"Initialize the TPU and cleans cache entries for bad sessions.\"\"\"\n try:\n _maybe_initialize_tpu(K.get_session())\n yield\n except (errors.FailedPreconditionError, errors.AbortedError) as e:\n K.clear_session()\n raise Exception(\"\"\"\nAn error occurred connecting or initializing your TPU.\n\nThe session has been reset. re-run keras_to_tpu_model to create a new session.\n\"\"\" + e)\n\n\ndef setup_tpu_session(cluster_resolver):\n \"\"\"Construct or return a `tf.Session` connected to the given cluster.\"\"\"\n master = cluster_resolver.master()\n\n # Use the existing session if we're already connected to this TPU\n # N.B K.get_session() is a non-trivial operation, and may fail if the remote\n # session has been reset.\n try:\n default_session = K.get_session()\n if (default_session._target == master and\n getattr(default_session, '_tpu_initialized', None)):\n return\n except errors.AbortedError as _:\n # We lost the remote session and need to re-initialize.\n logging.warning('Lost remote session: creating a new session.')\n\n cluster_spec = cluster_resolver.cluster_spec()\n config = config_pb2.ConfigProto(isolate_session_state=True)\n if cluster_spec:\n config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())\n\n tpu_session = tf_session.Session(target=master, config=config)\n tpu_session.run(tpu.initialize_system())\n tpu_session._tpu_initialized = True\n\n # N.B. We have to call `K.set_session()` AND set our session as the\n # TF default. `K.get_session()` surprisingly does not return the value\n # supplied by K.set_session otherwise.\n K.set_session(tpu_session)\n\n\ntry:\n from scipy.sparse import issparse # pylint: disable=g-import-not-at-top\nexcept ImportError:\n issparse = None\n\n\ndef get_tpu_system_metadata(tpu_cluster_resolver):\n \"\"\"Retrieves TPU system metadata given a TPUClusterResolver.\"\"\"\n master = tpu_cluster_resolver.master()\n\n # pylint: disable=protected-access\n cluster_spec = tpu_cluster_resolver.cluster_spec()\n cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None\n tpu_system_metadata = (\n tpu_system_metadata_lib._query_tpu_system_metadata(\n master, cluster_def=cluster_def, query_topology=False))\n\n return tpu_system_metadata\n\n\nclass TPUDistributionStrategy(object):\n \"\"\"The strategy to run Keras model on TPU.\"\"\"\n\n def __init__(self, tpu_cluster_resolver=None, using_single_core=False):\n \"\"\"Construct a TPUDistributionStrategy.\n\n Args:\n tpu_cluster_resolver: Any instance of `TPUClusterResolver`. If None, will\n create one with '' as master address.\n using_single_core: Bool. This is the debugging option, which might be\n removed in future once the model replication functionality is mature\n enough. If `False` (default behavior), the system automatically finds\n the best configuration, in terms of number of TPU cores, for the model\n replication, typically using all avaiable TPU cores. If overwrites as\n `True`, force the model replication using single core, i.e., no\n replication.\n Raises:\n Exception: No TPU Found on the given worker.\n \"\"\"\n\n if tpu_cluster_resolver is None:\n tpu_cluster_resolver = tpu_cluster_resolver_lib.TPUClusterResolver('')\n\n metadata = get_tpu_system_metadata(tpu_cluster_resolver)\n self._tpu_metadata = metadata\n self._tpu_cluster_resolver = tpu_cluster_resolver\n self._num_cores = 1 if using_single_core else metadata.num_cores\n\n # Walk device list to identify TPU worker for enqueue/dequeue operations.\n worker_re = re.compile('/job:([^/]+)')\n for device in metadata.devices:\n if 'TPU:0' in device.name:\n self._worker_name = worker_re.search(device.name).group(1)\n return\n raise Exception('No TPU found on given worker.')\n\n def _make_assignment_for_model(self, cpu_model):\n \"\"\"Makes a `TPUAssignment` for the passed in `cpu_model`.\"\"\"\n num_cores = self._num_cores\n if num_cores > 1 and cpu_model.stateful:\n logging.warning(\n 'Model replication does not currently support stateful models. '\n 'Degrading to a single core.')\n num_cores = 1\n\n return TPUAssignment(worker_name=self._worker_name, num_cores=num_cores)\n\n\nclass TPUAssignment(object):\n \"\"\"This is object holding TPU resources assignment for the concrete model.\n\n `TPUDistributionStrategy` is responsible to create the instance of\n `TPUAssignment`, so, it can dynamically adjust the `num_cores` to use based on\n model and input batch sizes.\n \"\"\"\n\n def __init__(self, worker_name, num_cores):\n self._worker_name = worker_name\n self._num_cores = num_cores\n\n @property\n def worker_name(self):\n return self._worker_name\n\n @property\n def num_towers(self):\n # TODO(xiejw): Support automatically assign num_cores based on inputs.\n return self._num_cores\n\n\nclass TPUEmbedding(embeddings.Embedding):\n \"\"\"TPU compatible embedding layer.\n\n The default Keras layer is not TPU compatible. This layer is a drop-in\n replacement: it has the same behavior and will work on CPU and GPU devices.\n \"\"\"\n\n def build(self, input_shape):\n if input_shape[0] is None:\n raise ValueError(\n 'TPUEmbeddings must have a fixed input_length or input shape.')\n return super(TPUEmbedding, self).build(input_shape)\n\n def call(self, inputs):\n if K.dtype(inputs) != 'int32':\n inputs = math_ops.cast(inputs, 'int32')\n\n inputs = array_ops.one_hot(inputs, self.input_dim)\n return math_ops.tensordot(inputs, self.embeddings, 1)\n\n\ndef _cross_replica_concat(tensor, core_id, num_cores, name):\n \"\"\"Concatenate `tensor` across cores.\n\n Args:\n tensor: The tensor to be concatenated. Must be [int32 and float32].\n core_id: Tensor indicating the current TPU core.\n num_cores: Python int. The total number of TPU cores in the system.\n name: The string name to print for debugging.\n\n Returns:\n The same concatenated Tensor on each core.\n \"\"\"\n\n input_dtype = tensor.dtype\n if input_dtype not in [dtypes.bfloat16, dtypes.float32, dtypes.int32]:\n raise TypeError('For model replication, only (bfloat16, float32 and int32) '\n 'is supported for model outputs and targets. Got {} for '\n '{}.'.format(input_dtype, name))\n\n batch_size = tensor.shape[0]\n mask = math_ops.to_float(\n math_ops.equal(np.arange(num_cores, dtype=np.int32), core_id))\n mask = array_ops.reshape(mask, [num_cores] + [1] * tensor.shape.ndims)\n result = mask * math_ops.to_float(tensor)\n local_tensor_with_holes = array_ops.reshape(result,\n [-1] + result.shape.as_list()[2:])\n concat_tensor = tpu_ops.cross_replica_sum(local_tensor_with_holes)\n concat_tensor.set_shape((num_cores * batch_size,) + tuple(tensor.shape[1:]))\n\n if concat_tensor != input_dtype:\n concat_tensor = math_ops.cast(concat_tensor, input_dtype)\n return concat_tensor\n\n\nclass KerasCrossShardOptimizer(keras_optimizers.Optimizer):\n \"\"\"An optimizer that averages gradients across TPU shards.\"\"\"\n\n def __init__(self, opt, name='KerasCrossShardOptimizer'):\n \"\"\"Construct a new cross-shard optimizer.\n\n Args:\n opt: An existing `Optimizer` to encapsulate.\n name: Optional name prefix for the operations created when applying\n gradients. Defaults to \"KerasCrossShardOptimizer\".\n\n Raises:\n ValueError: If reduction is not a valid cross-shard reduction.\n \"\"\"\n super(KerasCrossShardOptimizer, self).__init__()\n self._name = name\n self._opt = opt\n logging.info('KerasCrossShard: %s %s', self._opt, self._opt.weights)\n\n def get_updates(self, loss, params):\n self._opt.get_gradients = self.get_gradients\n return self._opt.get_updates(loss, params)\n\n def get_gradients(self, loss, params):\n num_shards = tpu_function.get_tpu_context().number_of_shards\n grads = super(KerasCrossShardOptimizer, self).get_gradients(loss, params)\n return [tpu_ops.cross_replica_sum(grad) / num_shards for grad in grads]\n\n def get_weights(self):\n return self._opt.get_weights()\n\n def get_config(self):\n return self._opt.get_config()\n\n # Defer remaining operations to the underlying optimizer\n def __getattr__(self, key):\n return getattr(self._opt, key)\n\n\nclass TPUModelOp(\n collections.namedtuple('TPUModelOp', [\n 'compile_op', 'execute_op', 'infeed_tensors', 'infeed_op', 'outfeed_op'\n ])):\n pass\n\n\ndef _valid_name(tensor_name):\n \"\"\"Return a valid tensor name (strips '/', ':', etc).\"\"\"\n return re.sub('[^a-zA-Z0-9_-]+', '', tensor_name)\n\n\ndef _replicated_optimizer(opt):\n \"\"\"Wrap the optimizer `opt` with CrossShardOptimizer if applicable.\"\"\"\n # Always wrap `opt` with CrossShardOptimizer, even if we are running on a\n # single core. This ensures Keras properly tracks and initializes optimizer\n # variables.\n if isinstance(opt, keras_optimizers.TFOptimizer):\n return tpu_optimizer.CrossShardOptimizer(opt.optimizer)\n else:\n return KerasCrossShardOptimizer(opt)\n\n\ndef _clone_optimizer(optimizer, config=None, worker_name=None):\n \"\"\"Returns a cloned optimizer with the provided optimizer.config or config.\"\"\"\n if not isinstance(optimizer, keras_optimizers.Optimizer):\n # In the first call to tpu_model(model), Keras may not have wrapped the TF\n # optimizer in the TFOptimizer helper, e.g., the given model isn't compiled\n # or optimizer isn't set, and later generated tpu_model compiles with a TF\n # optimizer.\n return optimizer\n\n if isinstance(optimizer, keras_optimizers.TFOptimizer):\n return keras_optimizers.TFOptimizer(optimizer.optimizer)\n\n if config is None:\n config = optimizer.get_config()\n logging.info('Cloning %s %s', optimizer.__class__.__name__, config)\n with ops.device(\n '%s/device:CPU:0' % ('/job:%s' % worker_name if worker_name else '')):\n # Explicitly put optimizer parameter variables on TPU worker.\n return optimizer.__class__.from_config(config)\n\n\nclass TPURewriteContext(object):\n \"\"\"Prepare the environment for a Keras model during `tpu.rewrite`.\n\n This overrides the default placeholder behaviour to instead refer to a preset\n input mapping. Placeholders are unsupported in TPU compiled code, and must\n be replaced with explicit inputs or values from the infeed queue.\n\n Instead of explicitly threading inputs all the way through the Keras codebase,\n we override the behavior of the placeholder while compiling and inject the\n Tensors from the infeed in place of the placeholder.\n\n Similarly, as we compile a new sub-graph for each unique shape and execution\n mode, we need to override the behavior of an embedded `name_scope` call in\n the base Keras layer code. This allows us to re-use the same weights across\n many compiles and share a single session/graph.\n \"\"\"\n\n def __init__(self, input_map):\n self._input_map = input_map\n self._default_placeholder = None\n self._default_name_scope = None\n\n def __enter__(self):\n\n def _placeholder(dtype, shape=None, name=None): # pylint: disable=unused-argument\n logging.info('Remapping placeholder for %s', name)\n if name in self._input_map:\n return self._input_map[name]\n else:\n logging.info('Default: %s', name)\n return self._default_placeholder(dtype, shape, name)\n\n def _name_scope(name, default_name=None, values=None):\n caller_frame = sys._getframe().f_back\n caller_obj = caller_frame.f_locals.get('self')\n if (caller_obj is not None and\n isinstance(caller_obj, base_layer.Layer) and name is not None):\n return variable_scope.variable_scope(\n name, default_name, values, reuse=variable_scope.AUTO_REUSE)\n\n return self._default_name_scope(name, default_name, values)\n\n self._default_placeholder = array_ops.placeholder\n self._default_name_scope = ops.name_scope\n self._default_make_variable = base_layer.make_variable\n self._default_random_normal = random_ops.random_normal\n self._default_qr = gen_linalg_ops.qr\n\n array_ops.placeholder = _placeholder\n\n # Replace random_ops.random_normal with a dummy function because\n # `random_normal` isn't yet implemented on the TPU. Because these\n # initialized values are overwritten by the CPU values, this is okay.\n def random_normal(shape,\n mean=0.0,\n stddev=1.0,\n dtype=dtypes.float32,\n seed=None,\n name=None):\n del mean\n del stddev\n del seed\n return array_ops.zeros(shape, dtype=dtype, name=name)\n\n random_ops.random_normal = random_normal\n\n # Replace gen_linalg_ops.qr because QR decomposition is not yet implemented.\n # TODO(saeta): Remove qr override once we confirm the qr implementation is\n # ok.\n # pylint: disable=redefined-builtin\n def qr(input, full_matrices=False, name=None):\n \"\"\"Dummy implementation of qr decomposition.\"\"\"\n del full_matrices # TODO(saeta): Properly handle the full matrix case.\n input_shape = input.shape\n if len(input_shape) < 2:\n raise ValueError('Invalid shape passed to qr: %s' % input_shape)\n p = min(input_shape[-1], input_shape[-2])\n if len(input_shape) == 2:\n q = array_ops.zeros((p, p), name=name)\n r = array_ops.zeros(input_shape, name=name)\n return (r, q)\n elif len(input_shape) == 3:\n n = input_shape[0]\n q = array_ops.zeros((n, p, p), name=name)\n r = array_ops.zeros(input_shape, name=name)\n return (r, q)\n else:\n raise ValueError('Invalid shape passed to qr: %s' % input_shape)\n\n gen_linalg_ops.qr = qr\n\n ops.name_scope = _name_scope\n base_layer.make_variable = variable_scope.get_variable\n logging.info('Overriding default placeholder.')\n return\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n array_ops.placeholder = self._default_placeholder\n ops.name_scope = self._default_name_scope\n base_layer.make_variable = self._default_make_variable\n random_ops.random_normal = self._default_random_normal\n gen_linalg_ops.qr = self._default_qr\n\n\nclass SizedInfeed(\n collections.namedtuple('SizedInfeed',\n ['sharded_infeed_tensors', 'infeed_ops'])):\n \"\"\"Represents an instantiation of the infeed ops for a concrete input shape.\n\n sharded_infeed_tensors: A data structure of Tensors used to represent the\n placeholder tensors that must be fed when using feed_dicts.\n\n infeed_ops: the set of ops that will be run to drive infeed for a single step.\n \"\"\"\n pass\n\n\nclass TPUInfeedInstance(object):\n \"\"\"TPUInfeedInstance represents the logic to manage feeding in a single step.\n\n See the comments on the `TPUInfeedManager` for a description for how infeed\n is managed.\n \"\"\"\n\n @abc.abstractmethod\n def make_input_specs(self, input_tensors):\n \"\"\"Constructs the infeed_specs for the given Infeed instance.\n\n Args:\n input_tensors: The inputs to the model.\n\n Returns:\n A list of\n \"\"\"\n pass\n\n def make_feed_dict(self, tpu_model_op):\n \"\"\"Constructs a feed_dict for this instance, given the tpu_model_op.\n\n Args:\n tpu_model_op: A `TPUModelOp` representing the TPU Model for this\n instance's input spec.\n\n Returns:\n A dictionary to use as the feed_dict of a `session.run` call.\n \"\"\"\n pass\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass TPUInfeedManager(object):\n \"\"\"TPUInfeedManager manages the data infeeding of data to a TPU computation.\n\n Because there are multiple data sources (e.g. in-memory NumPy arrays,\n `tf.data.Dataset`s), we abstract the different logic behind a single\n interface: the `TPUInfeedManager`.\n\n (1) A `TPUFunction` is called with a set of inputs. Based on the inputs,\n `TPUFunction` retrieves the corresponding `TPUInfeedManager` (or constructs a\n new one if required).\n\n (2) The `TPUFunction` calls `make_infeed_instance` on the `TPUInfeedManager`\n which returns a `TPUInfeedInstance`.\n\n (3) The `TPUFunction` checks in the shape cache for a pre-compiled instance of\n the model based on the returned `input_specs` from `TPUInfeedInstance`.\n\n (4) [Optional.] If the model has not already been instantiated for the given\n input spec, the `TPUFunction` compiles the model for the input spec (using the\n `TPUInfeedManager`).\n\n (5) The `TPUInfeedInstance` constructs the session.run's feed_dict given the\n compiled model instance corresponding to its shape.\n \"\"\"\n\n @abc.abstractmethod\n def make_infeed_instance(self, inputs):\n \"\"\"Given a single step's input, construct a `TPUInfeedInstance`.\n\n Args:\n inputs: The inputs to a given step.\n\n Returns:\n A subclass of `TPUInfeedInstance`.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def build_infeed_from_input_specs(self, input_specs, execution_mode):\n \"\"\"For a given input specification (size, type), construct the infeed ops.\n\n This is called only once for a given input specification and builds the\n graph ops. It does not have a pointer to the actual infeed data.\n\n Args:\n input_specs: TODO(saeta): Document me!\n execution_mode: TODO(saeta): Document me!\n\n Returns:\n A `SizedInfeed` instance.\n \"\"\"\n pass\n\n\nclass TPUNumpyInfeedManager(TPUInfeedManager):\n \"\"\"TPU Infeed manager for Numpy inputs.\"\"\"\n\n class NumpyInfeedInstance(TPUInfeedInstance):\n \"\"\"Infeed instance for Numpy inputs.\"\"\"\n\n def __init__(self, sharded_inputs):\n self._sharded_inputs = sharded_inputs\n\n def make_input_specs(self, input_tensors):\n # Compute an input specification (used to generate infeed enqueue and\n # dequeue operations). We use the shape from our input array and the\n # dtype from our model. A user may pass in a float64 for a float32\n # input: for model compatibility we still must generate a float32 infeed.\n input_specs = []\n # We use the shape and dtype from the first shard to compute the input\n # metadata (`input_specs`); all replicas have the same type and shape.\n for tensor, ary in zip(input_tensors, self._sharded_inputs[0]):\n input_specs.append(\n tensor_spec.TensorSpec(ary.shape, tensor.dtype,\n _valid_name(tensor.name)))\n\n return input_specs\n\n def make_feed_dict(self, tpu_model_op):\n infeed_dict = {}\n for infeed_tensors, inputs in zip(tpu_model_op.infeed_tensors,\n self._sharded_inputs):\n for tensor, value in zip(infeed_tensors, inputs):\n infeed_dict[tensor] = value\n return infeed_dict\n\n def __init__(self, tpu_assignment):\n self._tpu_assignment = tpu_assignment\n\n def _split_tensors(self, inputs):\n \"\"\"Split input data across shards.\n\n Each input is sliced along the batch axis.\n\n Args:\n inputs: List of Numpy arrays to run on the TPU.\n\n Returns:\n List of lists containing the input to feed to each TPU shard.\n \"\"\"\n if self._tpu_assignment.num_towers == 1:\n return [inputs]\n\n batch_size = inputs[0].shape[0]\n assert batch_size % self._tpu_assignment.num_towers == 0, (\n 'batch_size must be divisible by the number of TPU cores in use (%s '\n 'vs %s)' % (batch_size, self._tpu_assignment.num_towers))\n shard_size = batch_size // self._tpu_assignment.num_towers\n input_list = []\n for index in range(self._tpu_assignment.num_towers):\n shard_inputs = [\n x[index * shard_size:(index + 1) * shard_size] for x in inputs\n ]\n input_list.append(shard_inputs)\n return input_list\n\n def make_infeed_instance(self, inputs):\n sharded_inputs = self._split_tensors(inputs)\n return self.NumpyInfeedInstance(sharded_inputs)\n\n def build_infeed_from_input_specs(self, input_specs, execution_mode):\n infeed_op = []\n shard_infeed_tensors = []\n\n for shard_id in range(self._tpu_assignment.num_towers):\n with ops.device(\n '/job:%s/device:CPU:0' % self._tpu_assignment.worker_name):\n infeed_tensors = []\n with ops.device('/device:TPU:%d' % shard_id):\n for spec in input_specs:\n # Construct placeholders for each of the inputs.\n infeed_tensors.append(\n array_ops.placeholder(\n dtype=spec.dtype,\n shape=spec.shape,\n name='infeed-enqueue-%s-%d' % (spec.name, shard_id)))\n shard_infeed_tensors.append(infeed_tensors)\n\n infeed_op.append(\n tpu_ops.infeed_enqueue_tuple(\n infeed_tensors, [spec.shape for spec in input_specs],\n name='infeed-enqueue-%s-%d' % (execution_mode, shard_id),\n device_ordinal=shard_id))\n return SizedInfeed(\n infeed_ops=infeed_op, sharded_infeed_tensors=shard_infeed_tensors)\n\n\nclass TPUDatasetInfeedManager(TPUInfeedManager):\n \"\"\"Manages infeed for a `tf.data.Dataset` into a TPU computation.\n\n \"\"\"\n\n class DatasetInfeedInstance(TPUInfeedInstance):\n \"\"\"An instance of the TPU infeed.\"\"\"\n\n def __init__(self, input_specs):\n self._input_specs = input_specs\n\n def make_input_specs(self, input_tensors):\n # TODO(saeta): Do error checking here!\n return self._input_specs\n\n def make_feed_dict(self, tpu_model_op):\n # TODO(saeta): Verify tpu_model_op is as expected!\n return {}\n\n # pylint: disable=redefined-outer-name\n def __init__(self, dataset, tpu_assignment, mode):\n \"\"\"Constructs a TPUDatasetInfeedManager.\n\n Args:\n dataset: A `tf.data.Dataset` to infeed.\n tpu_assignment: The `TPUAssignment` used to configure the\n Keras TPU model.\n mode: ModeKeys enum.\n \"\"\"\n self._verify_dataset_shape(dataset)\n\n self._dataset = dataset\n self._tpu_assignment = tpu_assignment\n dummy_x_shape = dataset.output_shapes[0].as_list()\n dummy_x_shape[0] *= tpu_assignment.num_towers\n dummy_y_shape = dataset.output_shapes[1].as_list()\n dummy_y_shape[0] *= tpu_assignment.num_towers\n self._iterator = dataset.make_initializable_iterator()\n K.get_session().run(self._iterator.initializer)\n\n self._get_next_ops = []\n ctrl_deps = []\n for i in range(tpu_assignment.num_towers):\n with ops.control_dependencies(ctrl_deps): # Ensure deterministic\n # TODO(saeta): Ensure correct placement!\n get_next_op = self._iterator.get_next()\n self._get_next_ops.append(get_next_op)\n ctrl_deps.extend(get_next_op)\n\n # Use dummy numpy inputs for the rest of Keras' shape checking. We\n # intercept them when building the model.\n self._dummy_x = np.zeros(\n dummy_x_shape, dtype=dataset.output_types[0].as_numpy_dtype)\n self._dummy_y = np.zeros(\n dummy_y_shape, dtype=dataset.output_types[1].as_numpy_dtype)\n\n input_specs = []\n if isinstance(self._iterator.output_shapes, tuple):\n assert isinstance(self._iterator.output_types, tuple)\n assert len(self._iterator.output_shapes) == len(\n self._iterator.output_types)\n for i in range(len(self._iterator.output_shapes)):\n spec = tensor_spec.TensorSpec(self._iterator.output_shapes[i],\n self._iterator.output_types[i])\n input_specs.append(spec)\n elif isinstance(self._iterator.output_shapes, tensor_shape.TensorShape):\n spec = tensor_spec.TensorSpec(self._iterator.output_shapes,\n self._iterator.output_types)\n input_specs.append(spec)\n\n # Pre-process the inputs and get_next_ops before caching.\n input_specs, self._get_next_ops = (\n _inject_tpu_inputs_for_dataset(\n tpu_assignment, mode, input_specs, self._get_next_ops))\n self._infeed_instance = self.DatasetInfeedInstance(input_specs)\n\n def _verify_dataset_shape(self, dataset):\n \"\"\"Verifies a dataset is of an appropriate shape for TPUs.\"\"\"\n if not isinstance(dataset, dataset_ops.Dataset):\n raise ValueError('The function passed as the `x` parameter did not '\n 'return a `tf.data.Dataset`.')\n if not isinstance(dataset.output_classes, tuple):\n raise ValueError('The dataset must return a tuple of tf.Tensors, '\n 'instead it returns: %s' % dataset.output_classes)\n if len(dataset.output_classes) != 2:\n raise ValueError('The dataset must return a 2-element tuple, got '\n '%s output classes instead.' % (dataset.output_classes,))\n for i, cls in enumerate(dataset.output_classes):\n if cls != ops.Tensor:\n raise ValueError('The dataset returned a non-Tensor type (%s) at '\n 'index %d.' % (cls, i))\n for i, shape in enumerate(dataset.output_shapes):\n if not shape:\n raise ValueError('The dataset returns a scalar tensor in '\n 'tuple index %d. Did you forget to batch? '\n '(Output shapes: %s).' % (i, dataset.output_shapes))\n for j, dim in enumerate(shape):\n if dim.value is None:\n if j == 0:\n hint = (' Hint: did you use `ds.batch(BATCH_SIZE, '\n 'drop_remainder=True)`?')\n else:\n hint = ''\n raise ValueError(\n 'The Keras-TPU integration for `tf.data` '\n 'currently requires static shapes. The provided '\n 'dataset only has a partially defined shape. '\n '(Dimension %d of output tensor %d is not statically known '\n 'for output shapes: %s.%s)' % (j, i, dataset.output_shapes, hint))\n\n @property\n def dummy_x(self):\n return self._dummy_x\n\n @property\n def dummy_y(self):\n return self._dummy_y\n\n def make_infeed_instance(self, inputs):\n # TODO(saeta): Verify inputs is as expected.\n return self._infeed_instance\n\n def build_infeed_from_input_specs(self, input_specs, execution_mode):\n shard_infeed_tensors = self._get_next_ops\n assert len(shard_infeed_tensors) == self._tpu_assignment.num_towers\n infeed_ops = []\n for shard_id in range(self._tpu_assignment.num_towers):\n with ops.device(\n '/job:%s/device:CPU:0' % self._tpu_assignment.worker_name):\n infeed_ops.append(\n tpu_ops.infeed_enqueue_tuple(\n shard_infeed_tensors[shard_id],\n [spec.shape for spec in input_specs],\n name='infeed-enqueue-%s-%d' % (execution_mode, shard_id),\n device_ordinal=shard_id))\n return SizedInfeed(\n infeed_ops=infeed_ops, sharded_infeed_tensors=shard_infeed_tensors)\n\n\ndef _inject_tpu_inputs_for_dataset(tpu_assignment, mode,\n input_specs, get_next_ops):\n \"\"\"Append core information to the set of dataset inputs.\"\"\"\n # This is used during compilation to identify the current TPU core and enable\n # concatenation operations across cores.\n if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL]:\n return input_specs, get_next_ops\n\n # Dataset inputs operate on per core basis.\n per_core_batch_size = input_specs[0].shape.as_list()[0]\n\n # Insert, at head, the tensor for core_id.\n assert len(get_next_ops) == tpu_assignment.num_towers\n for i in range(tpu_assignment.num_towers):\n core_id_constant = constant_op.constant(\n np.array([i] * per_core_batch_size).astype('int32'),\n dtype=dtypes.int32,\n name='cord_id_constant')\n get_next_ops[i] = [core_id_constant] + list(get_next_ops[i])\n\n # Insert the input spec at head also.\n input_specs = [tensor_spec.TensorSpec([per_core_batch_size], dtypes.int32)\n ] + input_specs\n\n return input_specs, get_next_ops\n\n\ndef _inject_tpu_inputs_for_infeed(tpu_assignment, mode,\n core_id_place_holder, input_tensors, inputs):\n \"\"\"Append core information to the set of inputs.\"\"\"\n # This is used during compilation to identify the current TPU core and enable\n # concatenation operations across cores.\n if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL]:\n return input_tensors, inputs\n\n # Puts a place holder in input spec.\n input_tensors = [core_id_place_holder] + input_tensors\n\n # Now fill the core id. For `num_cores` = 2, `batch_size` = 8, we fill the\n # core id inputs as [0, 0, 0, 0, 1, 1, 1, 1], so each core sees its core id\n # (duplicated).\n num_cores = tpu_assignment.num_towers\n per_core_batch_size = inputs[0].shape[0] // num_cores\n core_ids = np.arange(num_cores).repeat(per_core_batch_size)\n inputs = [core_ids] + inputs\n return input_tensors, inputs\n\n\ndef _read_tpu_coreid_from_infeed(mode, infeed_tensors):\n \"\"\"Popping out the core ids from infeed.\"\"\"\n if mode not in [model_fn_lib.ModeKeys.TRAIN, model_fn_lib.ModeKeys.EVAL]:\n return None, infeed_tensors\n\n if len(infeed_tensors) <= 1:\n raise RuntimeError(\n 'The infeed tensors on TPU core has only {} tensors. '\n 'This is not expected. Please report a bug.\\nTensors: {}'.format(\n len(infeed_tensors), infeed_tensors))\n\n core_id = infeed_tensors[0][0] # Pop out the scalar version.\n rest = infeed_tensors[1:]\n return core_id, rest\n\n\nclass TPUFunction(object):\n \"\"\"K.function compatible interface for invoking a TPU compiled function.\n\n Recompilation is triggered on-demand for each set of new inputs shapes: the\n results are cached for future execution. We expect most computations will\n be dominated by a standard batch-size, followed by a straggler batch for\n the end of training or evaluation.\n\n All `inputs` and `outputs` will be loaded via the infeed and outfeed queues\n instead of being injected as `feed_dict` items or fetches.\n \"\"\"\n\n def __init__(self, model, execution_mode, tpu_assignment):\n self.model = model\n self.execution_mode = execution_mode\n self._tpu_assignment = tpu_assignment\n self._compilation_cache = {}\n self._cloned_model = None\n self._cloned_optimizer = None\n # Create a placeholder for the TPU core ID. Cache the placeholder to avoid\n # modifying the graph for every batch.\n self._core_id_place_holder = array_ops.placeholder(\n dtype=dtypes.int32, shape=[1], name='core_id')\n\n def _specialize_model(self, input_specs, infeed_manager):\n \"\"\"Specialize `self.model` (a Keras model) for the given input shapes.\"\"\"\n # Re-create our input and output layers inside our subgraph. They will be\n # attached to the true computation when we clone our model in `tpu_fn`.\n K.set_learning_phase(self.execution_mode == model_fn_lib.ModeKeys.TRAIN)\n\n # functools.partial and callable objects are not supported by tpu.rewrite\n def _model_fn():\n \"\"\"Compute fit/eval/predict for the TPU.\"\"\"\n is_training = self.execution_mode == model_fn_lib.ModeKeys.TRAIN\n is_test = self.execution_mode == model_fn_lib.ModeKeys.EVAL\n is_predict = self.execution_mode == model_fn_lib.ModeKeys.PREDICT\n\n # During train/eval, we infeed our features as well as labels.\n if is_training or is_test:\n infeed_layers = self.model._input_layers + self.model._output_layers\n else:\n infeed_layers = self.model._input_layers\n\n # Generate our infeed operation to read features & labels.\n infeed_tensors = tpu_ops.infeed_dequeue_tuple(\n dtypes=[spec.dtype for spec in input_specs],\n shapes=[spec.shape for spec in input_specs],\n name='infeed-%s' % self.execution_mode)\n\n core_id, infeed_tensors = (\n _read_tpu_coreid_from_infeed(\n mode=self.execution_mode, infeed_tensors=infeed_tensors))\n\n assert len(infeed_tensors) == len(infeed_layers), (\n 'Infeed inputs did not match model: %s vs %s' % (infeed_layers,\n infeed_tensors))\n\n tpu_targets = []\n tpu_input_map = {}\n\n # Sort infeed outputs into inputs and labels for calling our Keras model.\n for tensor, layer in zip(infeed_tensors, infeed_layers):\n if layer in self.model._input_layers:\n tpu_input_map[layer.name] = tensor\n if layer in self.model._output_layers:\n tpu_targets.append(tensor)\n\n # Clone our CPU model, running within the TPU device context.\n #\n # We use the id of the original model as a key to avoid weight collisions\n # (if a user re-runs the same model multiple times, in e.g. Colab).\n with TPURewriteContext(tpu_input_map):\n with variable_scope.variable_scope('tpu_%s' % id(self.model)):\n with keras_tpu_variables.replicated_scope(\n self._tpu_assignment.num_towers):\n if not self._cloned_optimizer:\n self._cloned_optimizer = _clone_optimizer(\n self.model.cpu_optimizer,\n worker_name=self._tpu_assignment.worker_name)\n\n self._cloned_model = models.clone_model(self.model)\n\n # When running on more than one core, concatenate outputs at the end\n # of processing. In backprop stage, the gradients will be\n # calculated according to the local inputs as gradient of\n # cross-replica-concat being zero for any outputs other than those\n # from mlocal core so the loss calculation is identical.\n num_towers = self.model._tpu_assignment.num_towers\n if num_towers > 1 and (is_training or is_test):\n new_outputs = [\n _cross_replica_concat(\n o, core_id, num_towers,\n name='model output ({})'.format(o.name))\n for o in self._cloned_model.outputs\n ]\n # Recast all low precision outputs back to float32 since we only\n # casted the inputs to bfloat16 and not targets. This is done so\n # that we can preserve precision when calculating the loss value.\n if new_outputs and new_outputs[0].dtype == dtypes.bfloat16:\n new_outputs = [\n math_ops.cast(o, dtypes.float32) for o in new_outputs]\n self._cloned_model.outputs = new_outputs\n tpu_targets = [\n _cross_replica_concat(\n tensor,\n core_id,\n num_towers,\n name='model target ({})'.format(tensor.name))\n for tensor in tpu_targets\n ]\n\n if is_training or is_test:\n with variable_scope.variable_scope(\n 'metrics', reuse=variable_scope.AUTO_REUSE):\n self._cloned_model.compile(\n optimizer=_replicated_optimizer(self._cloned_optimizer),\n loss=self.model.loss,\n loss_weights=self.model.loss_weights,\n metrics=metrics_module.clone_metrics(self.model.metrics),\n weighted_metrics=metrics_module.clone_metrics(\n self.model.weighted_metrics),\n target_tensors=tpu_targets,\n )\n\n # Compute our outfeed depending on the execution mode\n if is_training:\n if not isinstance(self._cloned_optimizer, keras_optimizers.TFOptimizer):\n # For Keras optimizer, we try to place the variable weights on the TPU\n # device. Keras creates optimizer variables (e.g. momentum values for\n # the Momentum optimizer) when _make_train_function is invoked.\n with keras_tpu_variables.replicated_variable_for_optimizer(\n self._tpu_assignment.num_towers):\n self._cloned_model._make_fit_function()\n else:\n self._cloned_model._make_fit_function()\n\n self._outfeed_spec = [\n tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)\n for tensor in self._cloned_model._fit_function.outputs\n ]\n return [\n self._cloned_model._fit_function.updates_op,\n tpu_ops.outfeed_enqueue_tuple(\n self._cloned_model._fit_function.outputs,\n name='outfeed-enqueue-train')\n ]\n elif is_test:\n self._cloned_model._make_eval_function()\n self._outfeed_spec = [\n tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)\n for tensor in self._cloned_model._eval_function.outputs\n ]\n return [\n tpu_ops.outfeed_enqueue_tuple(\n self._cloned_model._eval_function.outputs,\n name='outfeed-enqueue-test')\n ]\n elif is_predict:\n self._cloned_model._make_predict_function()\n self._outfeed_spec = [\n tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)\n for tensor in self._cloned_model.predict_function.outputs\n ]\n return [\n tpu_ops.outfeed_enqueue_tuple(\n self._cloned_model.predict_function.outputs,\n name='outfeed-enqueue-predict',\n )\n ]\n else:\n assert False, 'Unexpected execution mode: %s' % self.execution_mode\n\n # Capture outfeed metadata computed during the rewrite.\n self._outfeed_spec = None\n\n # Generate out TPU operations using `tpu.split_compile_and_replicate`.\n # `compile_op` can be used to test the TPU model compiles before execution.\n # `execute op` replicates `_model_fn` `num_replicas` times, with each shard\n # running on a different logical core.\n compile_op, execute_op = tpu.split_compile_and_replicate(\n _model_fn, inputs=[[] for _ in range(self._tpu_assignment.num_towers)])\n\n # Generate CPU side operations to enqueue features/labels and dequeue\n # outputs from the model call.\n sized_infeed = infeed_manager.build_infeed_from_input_specs(\n input_specs, self.execution_mode)\n # Build output ops.\n outfeed_op = []\n for shard_id in range(self._tpu_assignment.num_towers):\n with ops.device(\n '/job:%s/device:CPU:0' % self._tpu_assignment.worker_name):\n outfeed_op.extend(\n tpu_ops.outfeed_dequeue_tuple(\n dtypes=[spec.dtype for spec in self._outfeed_spec],\n shapes=[spec.shape for spec in self._outfeed_spec],\n name='outfeed-dequeue-%s-%d' % (self.execution_mode, shard_id),\n device_ordinal=shard_id))\n\n return TPUModelOp(\n compile_op,\n execute_op,\n infeed_tensors=sized_infeed.sharded_infeed_tensors,\n infeed_op=sized_infeed.infeed_ops,\n outfeed_op=outfeed_op)\n\n def _test_model_compiles(self, tpu_model_ops):\n \"\"\"Verifies that the given TPUModelOp can be compiled via XLA.\"\"\"\n logging.info('Started compiling')\n start_time = time.time()\n\n result = K.get_session().run(tpu_model_ops.compile_op)\n proto = tpu_compilation_result.CompilationResultProto()\n proto.ParseFromString(result)\n if proto.status_error_message:\n raise RuntimeError('Compilation failed: {}'.format(\n proto.status_error_message))\n\n end_time = time.time()\n logging.info('Finished compiling. Time elapsed: %s secs',\n end_time - start_time)\n\n def _lookup_infeed_manager(self, inputs):\n \"\"\"Return an existing manager, or construct a new InfeedManager for inputs.\n\n _lookup_infeed_manager will return an existing InfeedManager if one has been\n previously assigned for this model and input. If not, it will construct a\n new TPUNumpyInfeedManager.\n\n Args:\n inputs: A NumPy input to the model.\n\n Returns:\n A `TPUInfeedManager` object to manage infeeds for this input.\n \"\"\"\n if inputs is None:\n return None\n\n for x, mgr in self.model._numpy_to_infeed_manager_list:\n if inputs[0] is x:\n return mgr\n\n return TPUNumpyInfeedManager(self.model._tpu_assignment)\n\n def _tpu_model_ops_for_input_specs(self, input_specs, infeed_manager):\n \"\"\"Looks up the corresponding `TPUModelOp` for a given `input_specs`.\n\n It instantiates a new copy of the model for each unique input shape.\n\n Args:\n input_specs: The specification of the inputs to train on.\n infeed_manager: The infeed manager responsible for feeding in data.\n\n Returns:\n A `TPUModelOp` instance that can be used to execute a step of the model.\n \"\"\"\n if input_specs is None or infeed_manager is None:\n # Note: this condition is possible during the prologue or epilogue of the\n # pipelined loop.\n return None\n\n # XLA requires every operation in the graph has a fixed shape. To\n # handle varying batch sizes we recompile a new sub-graph for each\n # unique input shape.\n shape_key = tuple([tuple(spec.shape.as_list()) for spec in input_specs])\n if shape_key not in self._compilation_cache:\n logging.info(\n 'New input shapes; (re-)compiling: mode=%s '\n '(# of cores %d), %s', self.execution_mode,\n self._tpu_assignment.num_towers, input_specs)\n new_tpu_model_ops = self._specialize_model(input_specs,\n infeed_manager)\n self._compilation_cache[shape_key] = new_tpu_model_ops\n self._test_model_compiles(new_tpu_model_ops)\n\n return self._compilation_cache[shape_key]\n\n def _construct_input_tensors_and_inputs(self, inputs):\n \"\"\"Returns input tensors and numpy array inputs corresponding to `inputs`.\n\n Args:\n inputs: NumPy inputs.\n\n Returns:\n A tuple of `input_tensors`, and `inputs`.\n \"\"\"\n if inputs is None:\n # Note: this condition is possible during the prologue or epilogue of the\n # pipelined loop.\n return None, None\n\n if not isinstance(K.learning_phase(), int):\n # Remove the learning_phase flag at the end. We currently hard code the\n # learning_phase in TPUFunction.\n assert isinstance(inputs[-1], int), (\n 'Expect the final element be learning_phase flag. Got {}'.format(\n inputs[-1]))\n inputs = inputs[:-1]\n\n if (self.execution_mode == model_fn_lib.ModeKeys.TRAIN or\n self.execution_mode == model_fn_lib.ModeKeys.EVAL):\n # Strip sample weight from inputs.\n input_tensors = self.model._feed_inputs + self.model._feed_targets\n else:\n input_tensors = self.model._feed_inputs\n\n inputs = inputs[:len(input_tensors)]\n input_tensors, inputs = (\n _inject_tpu_inputs_for_infeed(\n self._tpu_assignment, self.execution_mode,\n self._core_id_place_holder, input_tensors, inputs))\n return input_tensors, inputs\n\n def _process_outputs(self, outfeed_outputs):\n \"\"\"Processes the outputs of a model function execution.\n\n Args:\n outfeed_outputs: The sharded outputs of the TPU computation.\n\n Returns:\n The aggregated outputs of the TPU computation to be used in the rest of\n the model execution.\n \"\"\"\n # TODO(xiejw): Decide how to reduce outputs, or discard all but first.\n if self.execution_mode == model_fn_lib.ModeKeys.PREDICT:\n outputs = [[] for _ in range(len(self._outfeed_spec))]\n outputs_per_replica = len(self._outfeed_spec)\n\n for i in range(self._tpu_assignment.num_towers):\n output_group = outfeed_outputs[i * outputs_per_replica:(i + 1) *\n outputs_per_replica]\n for j in range(outputs_per_replica):\n outputs[j].append(output_group[j])\n\n return [np.concatenate(group) for group in outputs]\n else:\n return outfeed_outputs[:len(outfeed_outputs) //\n self._tpu_assignment.num_towers]\n\n def __call__(self, inputs):\n \"\"\"__call__ executes the function on the computational hardware.\n\n It handles executing infeed, and preprocessing in addition to executing the\n model on the TPU hardware.\n\n Note: `__call__` has a sibling method `pipeline_run` which performs the same\n operations, but with software pipelining.\n\n Args:\n inputs: The inputs to use to train.\n\n Returns:\n The output of the computation for the given mode it is executed in.\n\n Raises:\n RuntimeError: If there is an inappropriate use of the function.\n \"\"\"\n assert isinstance(inputs, list)\n\n infeed_manager = self._lookup_infeed_manager(inputs)\n input_tensors, inputs = self._construct_input_tensors_and_inputs(inputs)\n infeed_instance = infeed_manager.make_infeed_instance(inputs)\n del inputs # To avoid accident usage.\n input_specs = infeed_instance.make_input_specs(input_tensors)\n tpu_model_ops = self._tpu_model_ops_for_input_specs(input_specs,\n infeed_manager)\n infeed_dict = infeed_instance.make_feed_dict(tpu_model_ops)\n\n # Initialize our TPU weights on the first compile.\n self.model._initialize_weights(self._cloned_model)\n\n _, _, outfeed_outputs = K.get_session().run([\n tpu_model_ops.infeed_op, tpu_model_ops.execute_op,\n tpu_model_ops.outfeed_op\n ], infeed_dict)\n return self._process_outputs(outfeed_outputs)\n\n def pipeline_run(self, cur_step_inputs, next_step_inputs):\n \"\"\"pipeline_run executes the function on the computational hardware.\n\n pipeline_run performs the same computation as __call__, however it runs the\n infeed in a software pipelined fashion compared to the on-device execution.\n\n Note: it is the responsibility of the caller to call `pipeline_run` in the\n following sequence:\n - Once with `cur_step_inputs=None` and `next_step_inputs=list(...)`\n - `n` times with `cur_step_inputs` and `next_step_inputs` as `list`s\n - Once with `cur_step_inputs=list(...)` and `next_step_inputs=None`\n Additionally, it is the responsibility of the caller to pass\n `next_step_inputs` as `cur_step_inputs` on the next invocation of\n `pipeline_run`.\n\n Args:\n cur_step_inputs: The current step's inputs.\n next_step_inputs: The next step's inputs.\n\n Returns:\n The output of the computation for the given mode it is executed in.\n\n Raises:\n RuntimeError: If there is an inappropriate use of the function.\n \"\"\"\n # Software pipelined case.\n next_step_infeed_manager = self._lookup_infeed_manager(next_step_inputs)\n cur_step_infeed_manager = self._lookup_infeed_manager(cur_step_inputs)\n\n if (next_step_infeed_manager is not None and\n cur_step_infeed_manager is not None):\n assert type(next_step_infeed_manager) is type(cur_step_infeed_manager)\n\n next_input_tensors, next_step_inputs = (\n self._construct_input_tensors_and_inputs(next_step_inputs))\n cur_input_tensors, cur_step_inputs = (\n self._construct_input_tensors_and_inputs(cur_step_inputs))\n\n cur_infeed_instance = None\n if cur_step_infeed_manager:\n cur_infeed_instance = cur_step_infeed_manager.make_infeed_instance(\n cur_step_inputs)\n next_infeed_instance = None\n if next_step_infeed_manager:\n next_infeed_instance = next_step_infeed_manager.make_infeed_instance(\n next_step_inputs)\n\n del cur_step_inputs # Avoid accidental re-use.\n del next_step_inputs # Avoid accidental re-use.\n\n cur_tpu_model_ops = None\n next_tpu_model_ops = None\n infeed_dict = None\n\n if cur_infeed_instance and cur_input_tensors and cur_step_infeed_manager:\n cur_input_specs = cur_infeed_instance.make_input_specs(cur_input_tensors)\n cur_tpu_model_ops = self._tpu_model_ops_for_input_specs(\n cur_input_specs, cur_step_infeed_manager)\n\n if (next_infeed_instance and next_input_tensors and\n next_step_infeed_manager):\n next_input_specs = next_infeed_instance.make_input_specs(\n next_input_tensors)\n next_tpu_model_ops = self._tpu_model_ops_for_input_specs(\n next_input_specs, next_step_infeed_manager)\n infeed_dict = next_infeed_instance.make_feed_dict(next_tpu_model_ops)\n\n # Initialize our TPU weights on the first compile.\n self.model._initialize_weights(self._cloned_model)\n\n if next_tpu_model_ops and cur_tpu_model_ops:\n _, _, outfeed_outputs = K.get_session().run([\n next_tpu_model_ops.infeed_op, cur_tpu_model_ops.execute_op,\n cur_tpu_model_ops.outfeed_op\n ], infeed_dict)\n return self._process_outputs(outfeed_outputs)\n\n if cur_tpu_model_ops:\n _, outfeed_outputs = K.get_session().run(\n [cur_tpu_model_ops.execute_op, cur_tpu_model_ops.outfeed_op])\n return self._process_outputs(outfeed_outputs)\n\n if next_tpu_model_ops:\n K.get_session().run(next_tpu_model_ops.infeed_op, infeed_dict)\n return None\n raise RuntimeError('Internal error: both current & next tpu_model_ops '\n 'were None')\n\n\nclass KerasTPUModel(models.Model):\n \"\"\"TPU compatible Keras model wrapper.\"\"\"\n\n def __init__(self, cpu_model, strategy):\n super(models.Model, self).__init__( # pylint: disable=bad-super-call\n inputs=cpu_model.inputs,\n outputs=cpu_model.outputs,\n name=cpu_model.name,\n )\n\n # Create a mapping from numpy arrays to infeed managers.\n # Note: uses a list of tuples instead of a map because numpy arrays are\n # not hashable.\n self._numpy_to_infeed_manager_list = []\n\n self.predict_function = None\n self.test_function = None\n self.train_function = None\n self._fit_function = None\n self._eval_function = None\n\n cluster_resolver = strategy._tpu_cluster_resolver\n self._tpu_name_or_address = cluster_resolver.get_master()\n self._cpu_model = cpu_model\n self._tpu_assignment = strategy._make_assignment_for_model(cpu_model)\n self._tpu_model = None\n self._tpu_weights_initialized = False\n\n # If the input CPU model has already been compiled, compile our TPU model\n # immediately.\n if self._cpu_model.optimizer:\n self.compile(\n self._cpu_model.optimizer,\n self._cpu_model.loss,\n self._cpu_model.metrics,\n self._cpu_model.loss_weights,\n self._cpu_model.sample_weight_mode,\n self._cpu_model.weighted_metrics,\n self._cpu_model.target_tensors,\n )\n\n # This flag must be disabled upon model mutation, such as changing the model\n # layers or recompiling the model to use a different optimizer. New function\n # definitions are generated whenever this flag is disabled, ensuring that\n # internal graph functions are always using the current model structure.\n #\n # Requires declaration here because this constructor skips the\n # Model constructor.\n self._built_graph_functions = False\n\n def get_config(self):\n return {\n 'cpu_model': self._cpu_model,\n 'tpu_name_or_address': self._tpu_name_or_address,\n 'tpu_assignment': self._tpu_assignment,\n }\n\n def compile(self,\n optimizer,\n loss=None,\n metrics=None,\n loss_weights=None,\n sample_weight_mode=None,\n weighted_metrics=None,\n target_tensors=None,\n **kwargs):\n if sample_weight_mode:\n raise ValueError('sample_weight_mode not supported for TPU execution.')\n if weighted_metrics:\n raise ValueError('weighted_metrics not supported for TPU execution.')\n if target_tensors:\n raise ValueError('target_tensors is not supported for TPU execution.')\n\n self._cpu_model.compile(\n _clone_optimizer(optimizer), loss,\n metrics_module.clone_metrics(metrics), loss_weights, sample_weight_mode,\n metrics_module.clone_metrics(weighted_metrics), target_tensors,\n **kwargs)\n\n super(KerasTPUModel, self).compile(optimizer, loss, metrics, loss_weights,\n sample_weight_mode, weighted_metrics,\n target_tensors, **kwargs)\n\n def fit(self,\n x=None,\n y=None,\n batch_size=None,\n epochs=1,\n verbose=1,\n callbacks=None,\n validation_split=0.,\n validation_data=None,\n shuffle=True,\n class_weight=None,\n sample_weight=None,\n initial_epoch=0,\n steps_per_epoch=None,\n validation_steps=None,\n **kwargs):\n if context.executing_eagerly():\n raise EnvironmentError('KerasTPUModel currently does not support eager '\n 'mode.')\n\n with _tpu_session_context():\n assert not self._numpy_to_infeed_manager_list # Ensure empty.\n\n infeed_managers = [] # Managers to clean up at the end of the fit call.\n if isinstance(x, dataset_ops.Dataset):\n # TODO(b/111413240): Support taking a tf.data.Dataset directly.\n raise ValueError(\n 'Taking a Dataset directly is not yet supported. Please '\n 'wrap your dataset construction code in a function and '\n 'pass that to fit instead. For examples, see: '\n 'https://github.com/tensorflow/tpu/tree/master/models/experimental'\n '/keras')\n if callable(x):\n with ops.device(\n '/job:%s/device:CPU:0' % self._tpu_assignment.worker_name):\n dataset = x()\n if steps_per_epoch is None:\n raise ValueError('When using tf.data as input to a model, you '\n 'should specify the steps_per_epoch argument.')\n if y is not None:\n raise ValueError('When using tf.data as input to a model, y must '\n 'be None')\n infeed_manager = TPUDatasetInfeedManager(\n dataset, self._tpu_assignment, model_fn_lib.ModeKeys.TRAIN)\n # Use dummy numpy inputs for the rest of Keras' shape checking. We\n # intercept them when building the model.\n x = infeed_manager.dummy_x\n y = infeed_manager.dummy_y\n infeed_managers.append((x, infeed_manager))\n\n if isinstance(validation_data, dataset_ops.Dataset):\n # TODO(b/111413240): Support taking a tf.data.Dataset directly.\n raise ValueError(\n 'Taking a Dataset directly is not yet supported. Please '\n 'wrap your dataset construction code in a function and '\n 'pass that to fit instead. For examples, see: '\n 'https://github.com/tensorflow/tpu/tree/master/models/experimental'\n '/keras')\n if callable(validation_data):\n dataset = validation_data()\n if validation_steps is None:\n raise ValueError('When using tf.data as validation for a model, you '\n 'should specify the validation_steps argument.')\n infeed_manager = TPUDatasetInfeedManager(dataset, self._tpu_assignment,\n model_fn_lib.ModeKeys.EVAL)\n # Use dummy numpy inputs for the rest of Keras' shape checking. We\n # intercept them when building the model.\n val_x = infeed_manager.dummy_x\n val_y = infeed_manager.dummy_y\n infeed_managers.append((val_x, infeed_manager))\n validation_data = (val_x, val_y)\n\n self._numpy_to_infeed_manager_list = infeed_managers\n try:\n pipeline = kwargs.get('_pipeline', True)\n if '_pipeline' in kwargs:\n kwargs.pop('_pipeline')\n if not pipeline:\n logging.info('Running non-pipelined training loop (`_pipeline=%s`).',\n pipeline)\n return super(KerasTPUModel, self).fit(\n x, y, batch_size, epochs, verbose, callbacks, validation_split,\n validation_data, shuffle, class_weight, sample_weight,\n initial_epoch, steps_per_epoch, validation_steps, **kwargs)\n return self._pipeline_fit(x, y, batch_size, epochs, verbose, callbacks,\n validation_split, validation_data, shuffle,\n class_weight, sample_weight, initial_epoch,\n steps_per_epoch, validation_steps, **kwargs)\n finally:\n self._numpy_to_infeed_manager_list = []\n\n def evaluate(self,\n x=None,\n y=None,\n batch_size=None,\n verbose=1,\n sample_weight=None,\n steps=None):\n original_numpy_to_infeed_manager_list = []\n if self._numpy_to_infeed_manager_list:\n # evaluate call may be executed as callbacks during the training. In this\n # case, _numpy_to_infeed_manager_list is not empty, so save it for\n # recovery at the end of evaluate call.\n original_numpy_to_infeed_manager_list = self._numpy_to_infeed_manager_list\n self._numpy_to_infeed_manager_list = []\n\n with _tpu_session_context():\n # Managers to clean up at the end of the evaluate call.\n infeed_managers = []\n if isinstance(x, dataset_ops.Dataset):\n # TODO(b/111413240): Support taking a tf.data.Dataset directly.\n raise ValueError(\n 'Taking a Dataset directly is not yet supported. Please '\n 'wrap your dataset construction code in a function and '\n 'pass that to fit instead. For examples, see: '\n 'https://github.com/tensorflow/tpu/tree/master/models/experimental'\n '/keras')\n if callable(x):\n dataset = x()\n if steps is None:\n raise ValueError('When using tf.data as input to a model, you '\n 'should specify the steps argument.')\n if y is not None:\n raise ValueError('When using tf.data as input to a model, y must be '\n 'None')\n infeed_manager = TPUDatasetInfeedManager(dataset, self._tpu_assignment,\n model_fn_lib.ModeKeys.EVAL)\n # Use dummy numpy inputs for the rest of Keras' shape checking. We\n # intercept them when building the model.\n x = infeed_manager.dummy_x\n y = infeed_manager.dummy_y\n infeed_managers.append((x, infeed_manager))\n\n self._numpy_to_infeed_manager_list = infeed_managers\n try:\n return super(KerasTPUModel, self).evaluate(x, y, batch_size, verbose,\n sample_weight, steps)\n finally:\n self._numpy_to_infeed_manager_list = (\n original_numpy_to_infeed_manager_list)\n\n def _pipeline_fit(self, x, y, batch_size, epochs, verbose, callbacks,\n validation_split, validation_data, shuffle, class_weight,\n sample_weight, initial_epoch, steps_per_epoch,\n validation_steps, **kwargs):\n # Similar to super.fit(...), but modified to support software pipelining.\n\n # Backwards compatibility\n if batch_size is None and steps_per_epoch is None:\n batch_size = 32\n # Legacy support\n if 'nb_epoch' in kwargs:\n logging.warning('The `nb_epoch` argument in `fit` has been renamed '\n '`epochs`.')\n epochs = kwargs.pop('nb_epoch')\n if kwargs:\n raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))\n\n # Validate and standardize user data\n x, y, sample_weights = self._standardize_user_data(\n x,\n y,\n sample_weight=sample_weight,\n class_weight=class_weight,\n batch_size=batch_size,\n check_steps=True,\n steps_name='steps_per_epoch',\n steps=steps_per_epoch,\n validation_split=validation_split)\n\n # Prepare validation data\n val_x, val_y, val_sample_weights = self._prepare_validation_data(\n validation_data, validation_split, validation_steps, x, y,\n sample_weights, batch_size)\n return self._pipeline_fit_loop(\n x,\n y,\n sample_weights=sample_weights,\n batch_size=batch_size,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n val_inputs=val_x,\n val_targets=val_y,\n val_sample_weights=val_sample_weights,\n shuffle=shuffle,\n initial_epoch=initial_epoch,\n steps_per_epoch=steps_per_epoch,\n validation_steps=validation_steps)\n\n def _pipeline_fit_loop(self,\n inputs,\n targets,\n sample_weights,\n batch_size,\n epochs,\n verbose,\n callbacks,\n val_inputs,\n val_targets,\n val_sample_weights,\n shuffle,\n initial_epoch,\n steps_per_epoch,\n validation_steps):\n self._make_train_function()\n sample_weights = sample_weights or []\n val_sample_weights = val_sample_weights or []\n if not isinstance(K.learning_phase(), int):\n ins = inputs + targets + sample_weights + [1]\n else:\n ins = inputs + targets + sample_weights\n\n do_validation = False\n if val_inputs:\n do_validation = True\n if (steps_per_epoch is None and verbose and inputs and\n hasattr(inputs[0], 'shape') and hasattr(val_inputs[0], 'shape')):\n print('Train on %d samples, validate on %d samples' %\n (inputs[0].shape[0], val_inputs[0].shape[0]))\n\n if validation_steps:\n do_validation = True\n if steps_per_epoch is None:\n raise ValueError('Can only use `validation_steps` when doing step-wise '\n 'training, i.e. `steps_per_epoch` must be set.')\n\n num_training_samples = training_utils.check_num_samples(\n ins, batch_size, steps_per_epoch, 'steps_per_epoch')\n count_mode = 'steps' if steps_per_epoch else 'samples'\n callbacks = cbks.configure_callbacks(\n callbacks,\n self,\n do_validation=do_validation,\n val_inputs=val_inputs,\n val_targets=val_targets,\n val_sample_weights=val_sample_weights,\n batch_size=batch_size,\n epochs=epochs,\n steps_per_epoch=steps_per_epoch,\n samples=num_training_samples,\n validation_steps=validation_steps,\n verbose=verbose,\n count_mode=count_mode)\n\n if num_training_samples is not None:\n index_array = np.arange(num_training_samples)\n\n # To prevent a slowdown, we find beforehand the arrays that need conversion.\n feed = self._feed_inputs + self._feed_targets + self._feed_sample_weights\n indices_for_conversion_to_dense = []\n for i in range(len(feed)):\n if issparse is not None and issparse(ins[i]) and not K.is_sparse(feed[i]):\n indices_for_conversion_to_dense.append(i)\n\n callbacks.on_train_begin()\n for epoch in range(initial_epoch, epochs):\n # Reset stateful metrics\n for m in self.stateful_metric_functions:\n m.reset_states()\n # Update callbacks\n callbacks.on_epoch_begin(epoch)\n epoch_logs = {}\n if steps_per_epoch is not None:\n # Step-wise fit loop.\n self._pipeline_fit_loop_step_wise(\n ins=ins,\n callbacks=callbacks,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n do_validation=do_validation,\n val_inputs=val_inputs,\n val_targets=val_targets,\n val_sample_weights=val_sample_weights,\n validation_steps=validation_steps,\n epoch_logs=epoch_logs)\n else:\n # Sample-wise fit loop.\n self._pipeline_fit_loop_sample_wise(\n ins=ins,\n callbacks=callbacks,\n index_array=index_array,\n shuffle=shuffle,\n batch_size=batch_size,\n num_training_samples=num_training_samples,\n indices_for_conversion_to_dense=indices_for_conversion_to_dense,\n do_validation=do_validation,\n val_inputs=val_inputs,\n val_targets=val_targets,\n val_sample_weights=val_sample_weights,\n validation_steps=validation_steps,\n epoch_logs=epoch_logs)\n\n callbacks.on_epoch_end(epoch, epoch_logs)\n if callbacks.model.stop_training:\n break\n callbacks.on_train_end()\n return self.history\n\n def _pipeline_fit_loop_sample_wise(self,\n ins,\n callbacks,\n index_array,\n shuffle,\n batch_size,\n num_training_samples,\n indices_for_conversion_to_dense,\n do_validation,\n val_inputs,\n val_targets,\n val_sample_weights,\n validation_steps,\n epoch_logs):\n f = self.train_function\n if shuffle == 'batch':\n index_array = training_utils.batch_shuffle(index_array, batch_size)\n elif shuffle:\n np.random.shuffle(index_array)\n batches = make_batches(num_training_samples, batch_size)\n\n ins_last_batch = None\n last_batch_logs = None\n batch_index = 0\n\n for batch_index, (batch_start, batch_end) in enumerate(batches):\n batch_ids = index_array[batch_start:batch_end]\n try:\n if isinstance(ins[-1], int):\n # Do not slice the training phase flag.\n ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]\n else:\n ins_batch = slice_arrays(ins, batch_ids)\n except TypeError:\n raise TypeError('TypeError while preparing batch. If using HDF5 '\n 'input data, pass shuffle=\"batch\".')\n\n # Pipeline batch logs\n next_batch_logs = {}\n next_batch_logs['batch'] = batch_index\n next_batch_logs['size'] = len(batch_ids)\n if batch_index > 0:\n # Callbacks operate one step behind in software pipeline.\n callbacks.on_batch_begin(batch_index - 1, last_batch_logs)\n for i in indices_for_conversion_to_dense:\n ins_batch[i] = ins_batch[i].toarray()\n\n outs = f.pipeline_run(\n cur_step_inputs=ins_last_batch, next_step_inputs=ins_batch)\n ins_last_batch = ins_batch\n\n if batch_index == 0:\n assert outs is None\n else:\n if not isinstance(outs, list):\n outs = [outs]\n for l, o in zip(self.metrics_names, outs):\n last_batch_logs[l] = o # pylint: disable=unsupported-assignment-operation\n callbacks.on_batch_end(batch_index - 1, last_batch_logs)\n if callbacks.model.stop_training:\n return\n last_batch_logs = next_batch_logs\n\n # Final batch\n callbacks.on_batch_begin(batch_index, last_batch_logs)\n outs = f.pipeline_run(cur_step_inputs=ins_last_batch, next_step_inputs=None)\n if not isinstance(outs, list):\n outs = [outs]\n for l, o in zip(self.metrics_names, outs):\n last_batch_logs[l] = o\n callbacks.on_batch_end(batch_index, last_batch_logs)\n if callbacks.model.stop_training:\n return\n\n if do_validation:\n val_outs = training_arrays.test_loop(\n self,\n val_inputs,\n val_targets,\n sample_weights=val_sample_weights,\n batch_size=batch_size,\n steps=validation_steps,\n verbose=0)\n if not isinstance(val_outs, list):\n val_outs = [val_outs]\n # Same labels assumed.\n for l, o in zip(self.metrics_names, val_outs):\n epoch_logs['val_' + l] = o\n\n def _pipeline_fit_loop_step_wise(self,\n ins,\n callbacks,\n steps_per_epoch,\n epochs,\n do_validation,\n val_inputs,\n val_targets,\n val_sample_weights,\n validation_steps,\n epoch_logs):\n f = self.train_function\n\n # Loop prologue\n try:\n outs = f.pipeline_run(cur_step_inputs=None, next_step_inputs=ins)\n assert outs is None # Function shouldn't return anything!\n except errors.OutOfRangeError:\n logging.warning('Your dataset iterator ran out of data on the first step '\n 'of the epoch, preventing further training. Check to '\n 'make sure your paths are correct and you have '\n 'permissions to read the files. Skipping validation')\n\n for step_index in range(steps_per_epoch):\n batch_logs = {'batch': step_index, 'size': 1}\n callbacks.on_batch_begin(step_index, batch_logs)\n try:\n if step_index < steps_per_epoch - 1:\n next_step_inputs = ins\n else:\n next_step_inputs = None\n outs = f.pipeline_run(\n cur_step_inputs=ins, next_step_inputs=next_step_inputs)\n except errors.OutOfRangeError:\n logging.warning('Your dataset iterator ran out of data; '\n 'interrupting training. Make sure that your '\n 'dataset can generate at least `steps_per_batch * '\n 'epochs` batches (in this case, %d batches). You '\n 'may need to use the repeat() function when '\n 'building your dataset.' % steps_per_epoch * epochs)\n break\n\n if not isinstance(outs, list):\n outs = [outs]\n for l, o in zip(self.metrics_names, outs):\n batch_logs[l] = o\n\n callbacks.on_batch_end(step_index, batch_logs)\n if callbacks.model.stop_training:\n break\n\n if do_validation:\n val_outs = training_arrays.test_loop(\n self,\n val_inputs,\n val_targets,\n sample_weights=val_sample_weights,\n steps=validation_steps,\n verbose=0)\n if not isinstance(val_outs, list):\n val_outs = [val_outs]\n # Same labels assumed.\n for l, o in zip(self.metrics_names, val_outs):\n epoch_logs['val_' + l] = o\n\n def _prepare_validation_data(self, validation_data, validation_split,\n validation_steps, x, y, sample_weights,\n batch_size):\n \"\"\"Prepares the validation dataset.\n\n Args:\n validation_data: The validation data (if provided)\n validation_split: The validation split (if provided)\n validation_steps: The validation steps (if provided)\n x: The main training data x (if provided)\n y: The main training data y (if provided)\n sample_weights: The sample weights (if provided)\n batch_size: The training batch size (if provided)\n\n Returns:\n A 3-tuple of (val_x, val_y, val_sample_weights).\n\n Raises:\n ValueError: If the provided arguments are not compatible with\n `KerasTPUModel`.\n \"\"\"\n # Note: this is similar to a section of $tf/python/keras/engine/training.py\n # It differns in that tf.data objects are not allowed to be passed directly.\n # Additionally, it handles validating shapes & types appropriately for use\n # in TPUs.\n if validation_data:\n if (isinstance(validation_data, iterator_ops.Iterator) or\n isinstance(validation_data, iterator_ops.EagerIterator) or\n isinstance(validation_data, dataset_ops.Dataset)):\n raise ValueError('KerasTPUModel cannot handle a Dataset or Iterator '\n 'for validation_data. Please instead pass a function '\n 'that returns a `tf.data.Dataset`.')\n if len(validation_data) == 2:\n val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence\n val_sample_weight = None\n elif len(validation_data) == 3:\n val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence\n else:\n raise ValueError('When passing a `validation_data` argument, it must '\n 'contain either 2 items (x_val, y_val), or 3 items '\n '(x_val, y_val, val_sample_weights). However we '\n 'received `validation_data=%s`' % validation_data)\n val_x, val_y, val_sample_weights = self._standardize_user_data(\n val_x,\n val_y,\n sample_weight=val_sample_weight,\n batch_size=batch_size,\n steps=validation_steps)\n elif validation_split and 0. < validation_split < 1.:\n if training_utils.has_symbolic_tensors(x):\n raise ValueError('If your data is in the form of symbolic tensors, you '\n 'cannot use `validation_split`.')\n if hasattr(x[0], 'shape'):\n split_at = int(x[0].shape[0] * (1. - validation_split))\n else:\n split_at = int(len(x[0]) * (1. - validation_split))\n\n x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at))\n y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at))\n sample_weights, val_sample_weights = (\n slice_arrays(sample_weights, 0, split_at),\n slice_arrays(sample_weights, split_at)\n )\n elif validation_steps:\n val_x = []\n val_y = []\n val_sample_weights = []\n else:\n val_x = None\n val_y = None\n val_sample_weights = None\n\n return val_x, val_y, val_sample_weights\n\n def predict(self,\n x,\n batch_size=None,\n verbose=0,\n steps=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False):\n with _tpu_session_context():\n return super(KerasTPUModel, self).predict(\n x,\n batch_size=batch_size,\n verbose=verbose,\n steps=steps,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing)\n\n @property\n def optimizer(self):\n if self._tpu_model:\n return self._tpu_model.optimizer\n return self._cpu_model.optimizer\n\n @optimizer.setter\n def optimizer(self, optimizer):\n self._optimizer = optimizer\n\n @property\n def stateful_metric_functions(self):\n if self._tpu_model:\n return self._tpu_model.stateful_metric_functions\n return self._stateful_metric_functions\n\n @stateful_metric_functions.setter\n def stateful_metric_functions(self, stateful_metric_functions):\n self._stateful_metric_functions = stateful_metric_functions\n\n def _make_train_function(self):\n if not self.train_function:\n self.train_function = TPUFunction(\n self,\n model_fn_lib.ModeKeys.TRAIN,\n tpu_assignment=self._tpu_assignment)\n\n return self.train_function\n\n def _make_test_function(self):\n if not self.test_function:\n self.test_function = TPUFunction(\n self, model_fn_lib.ModeKeys.EVAL, tpu_assignment=self._tpu_assignment)\n return self.test_function\n\n def _make_fit_function(self):\n if not self._fit_function:\n self._fit_function = TPUFunction(\n self,\n model_fn_lib.ModeKeys.TRAIN,\n tpu_assignment=self._tpu_assignment)\n\n return self._fit_function\n\n def _make_eval_function(self):\n if not self._eval_function:\n self._eval_function = TPUFunction(\n self, model_fn_lib.ModeKeys.EVAL, tpu_assignment=self._tpu_assignment)\n return self._eval_function\n\n def _make_predict_function(self):\n if not self.predict_function:\n self.predict_function = TPUFunction(\n self,\n model_fn_lib.ModeKeys.PREDICT,\n tpu_assignment=self._tpu_assignment)\n return self.predict_function\n\n def _initialize_weights(self, cloned_model):\n \"\"\"Initialize TPU weights.\n\n This is called on the first compile of the TPU model (first call to\n fit/predict/evaluate).\n\n Args:\n cloned_model: `keras.Model`, TPU model to initialize.\n \"\"\"\n if self._tpu_weights_initialized:\n return\n\n self._tpu_model = cloned_model\n self._tpu_weights_initialized = True\n\n weights = self._cpu_model.get_weights()\n\n if isinstance(self.cpu_optimizer, keras_optimizers.TFOptimizer):\n cpu_optimizer_config = {}\n else:\n cpu_optimizer_config = self.cpu_optimizer.get_config()\n\n logging.info('Setting weights on TPU model.')\n cloned_model.set_weights(weights)\n if self._tpu_model.optimizer is None:\n # tpu_model may not be compiled, e.g., loading weights and then predict.\n return\n for k, v in six.iteritems(cpu_optimizer_config):\n opt_var = getattr(self._tpu_model.optimizer, k)\n if isinstance(opt_var, variables.Variable):\n logging.info('CPU -> TPU %s: %s {%s}', k, v, K.get_value(opt_var))\n K.get_session().run(opt_var.assign(v))\n else:\n logging.warning('Cannot update non-variable config: %s', k)\n\n @property\n def cpu_optimizer(self):\n return self._cpu_model.optimizer\n\n def sync_to_cpu(self):\n \"\"\"Copy weights from the CPU, returning a synchronized CPU model.\"\"\"\n if not self._tpu_weights_initialized:\n return self._cpu_model\n\n logging.info('Copying TPU weights to the CPU')\n tpu_weights = self._tpu_model.get_weights()\n\n # TFOptimizers have no configurable options\n if isinstance(self.cpu_optimizer, keras_optimizers.TFOptimizer):\n tpu_optimizer_config = {}\n else:\n tpu_optimizer_config = self._tpu_model.optimizer.get_config()\n\n self._cpu_model.set_weights(tpu_weights)\n for k, v in six.iteritems(tpu_optimizer_config):\n logging.info('TPU -> CPU %s: %s', k, v)\n opt_var = getattr(self.cpu_optimizer, k)\n if isinstance(opt_var, variables.Variable):\n K.get_session().run(opt_var.assign(v))\n else:\n logging.warning('Cannot update non-variable config: %s', k)\n\n return self._cpu_model\n\n def get_weights(self):\n return self.sync_to_cpu().get_weights()\n\n def save_weights(self, *args, **kw):\n return self.sync_to_cpu().save_weights(*args, **kw)\n\n def save(self, *args, **kw):\n return self.sync_to_cpu().save(*args, **kw)\n\n def set_weights(self, weights):\n # We may not have a TPU model available if we haven't run fit/predict, so\n # we can't directly set the TPU weights here.\n # Instead, reset CPU model weights and force TPU re-initialization at the\n # next call.\n self._cpu_model.set_weights(weights)\n self._tpu_weights_initialized = False\n\n def load_weights(self, filepath, by_name=False):\n self._cpu_model.load_weights(filepath, by_name)\n self._tpu_weights_initialized = False\n\n\n# pylint: disable=bad-continuation\ndef _validate_shapes(model):\n \"\"\"Validate that all layers in `model` have constant shape.\"\"\"\n for layer in model.layers:\n if isinstance(layer.input_shape, tuple):\n input_shapes = [layer.input_shape]\n else:\n input_shapes = layer.input_shape\n\n if isinstance(layer.output_shape, tuple):\n output_shapes = [layer.output_shape]\n else:\n output_shapes = layer.output_shape\n\n for shape in input_shapes + output_shapes:\n for dim in shape[1:]:\n if dim is None:\n raise ValueError(\n \"\"\"\nLayer %(layer)s has a variable shape in a non-batch dimension. TPU models must\nhave constant shapes for all operations.\n\nYou may have to specify `input_length` for RNN/TimeDistributed layers.\n\nLayer: %(layer)s\nInput shape: %(input_shape)s\nOutput shape: %(output_shape)s\n \"\"\" % {\n 'layer': layer,\n 'input_shape': layer.input_shape,\n 'output_shape': layer.output_shape\n })\n\n\n# pylint: enable=bad-continuation\n\n\n@experimental\ndef tpu_model(model, strategy=None):\n \"\"\"Copy `model` along with weights to the TPU.\n\n Returns a TPU model.\n\n Usage:\n ```\n a = Input(shape=(32,))\n b = Dense(32)(a)\n model = Model(inputs=a, outputs=b)\n\n # If `num_cores_per_host` is greater than one, batch parallelism will be used\n # to run on multiple TPU cores.\n strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)\n model = keras_support.tpu_model(model, strategy)\n model.compile(\n optimizer=tf.train.GradientDescentOptimizer(learning_rate=1.0),\n ...)\n ```\n\n Args:\n model: A `tf.keras.Model` instance.\n strategy: `TPUDistributionStrategy`. The strategy to use for replicating\n model across multiple TPU cores.\n\n Returns:\n A new `KerasTPUModel` instance.\n \"\"\"\n _validate_shapes(model)\n # TODO(xiejw): Validate TPU model. TPUModel only?\n # TODO(xiejw): Validate replicas. Full or 1. Shall we allow subset?\n # TODO(xiejw): Adds reduction option.\n\n if strategy is None:\n strategy = TPUDistributionStrategy()\n else:\n if not isinstance(strategy, TPUDistributionStrategy):\n raise TypeError(\n '`strategy` must have type `tf.contrib.tpu.TPUDistributionStrategy`. '\n 'Got: {}'.format(type(strategy)))\n\n # If the model has already been initialized, grab the optimizer configuration\n # and model weights before entering the TPU session.\n if model.optimizer:\n if (isinstance(model.optimizer, keras_optimizers.Optimizer) and not\n isinstance(model.optimizer, keras_optimizers.TFOptimizer)):\n optimizer_config = model.optimizer.get_config()\n else:\n optimizer_config = None\n model_weights = model.get_weights()\n else:\n model_weights = None\n\n setup_tpu_session(strategy._tpu_cluster_resolver)\n\n # Force initialization of the CPU model in the TPU session.\n cpu_model = models.clone_model(model)\n if model.optimizer:\n cpu_model.compile(\n _clone_optimizer(model.optimizer, optimizer_config),\n model.loss,\n metrics_module.clone_metrics(model.metrics),\n model.loss_weights,\n model.sample_weight_mode,\n metrics_module.clone_metrics(model.weighted_metrics),\n )\n\n if model_weights:\n cpu_model.set_weights(model_weights)\n cpu_model.reset_states()\n\n return KerasTPUModel(cpu_model=cpu_model, strategy=strategy)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ======================================\n\"\"\"Hook for asynchronous checkpointing.\n\nThis hook dispatches checkpoint writing operations in a separate thread to\nallow execution to continue on the main thread.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport threading\nimport time\n\nfrom tensorflow.core.util.event_pb2 import SessionLog\nfrom tensorflow.python.framework import meta_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import basic_session_run_hooks\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.training.session_run_hook import SessionRunArgs\nfrom tensorflow.python.training.summary_io import SummaryWriterCache\n\n\nclass AsyncCheckpointSaverHook(basic_session_run_hooks.CheckpointSaverHook):\n \"\"\"Saves checkpoints every N steps or seconds.\"\"\"\n\n def __init__(self,\n checkpoint_dir,\n save_secs=None,\n save_steps=None,\n saver=None,\n checkpoint_basename=\"model.ckpt\",\n scaffold=None,\n listeners=None):\n \"\"\"Initializes a `CheckpointSaverHook`.\n\n Args:\n checkpoint_dir: `str`, base directory for the checkpoint files.\n save_secs: `int`, save every N secs.\n save_steps: `int`, save every N steps.\n saver: `Saver` object, used for saving.\n checkpoint_basename: `str`, base name for the checkpoint files.\n scaffold: `Scaffold`, use to get saver object.\n listeners: List of `CheckpointSaverListener` subclass instances. Used for\n callbacks that run immediately before or after this hook saves the\n checkpoint.\n\n Raises:\n ValueError: One of `save_steps` or `save_secs` should be set.\n ValueError: At most one of `saver` or `scaffold` should be set.\n \"\"\"\n logging.info(\"Create AsyncCheckpointSaverHook.\")\n if saver is not None and scaffold is not None:\n raise ValueError(\"You cannot provide both saver and scaffold.\")\n self._saver = saver\n self._save_thread = None\n self._write_graph_thread = None\n self._checkpoint_dir = checkpoint_dir\n self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)\n self._scaffold = scaffold\n self._timer = basic_session_run_hooks.SecondOrStepTimer(\n every_secs=save_secs, every_steps=save_steps)\n self._listeners = listeners or []\n self._steps_per_run = 1\n self._summary_writer = None\n self._global_step_tensor = None\n\n def _set_steps_per_run(self, steps_per_run):\n self._steps_per_run = steps_per_run\n\n def begin(self):\n self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)\n self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access\n if self._global_step_tensor is None:\n raise RuntimeError(\n \"Global step should be created to use CheckpointSaverHook.\")\n for l in self._listeners:\n l.begin()\n\n def after_create_session(self, session, coord):\n global_step = session.run(self._global_step_tensor)\n\n # We do write graph and saver_def at the first call of before_run.\n # We cannot do this in begin, since we let other hooks to change graph and\n # add variables in begin. Graph is finalized after all begin calls.\n def _write_graph_fn(self):\n training_util.write_graph(\n ops.get_default_graph().as_graph_def(add_shapes=True),\n self._checkpoint_dir, \"graph.pbtxt\")\n self._write_graph_thread = threading.Thread(target=_write_graph_fn,\n args=[self])\n self._write_graph_thread.start()\n\n saver_def = self._get_saver().saver_def if self._get_saver() else None\n graph = ops.get_default_graph()\n meta_graph_def = meta_graph.create_meta_graph_def(\n graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)\n self._summary_writer.add_graph(graph)\n self._summary_writer.add_meta_graph(meta_graph_def)\n # The checkpoint saved here is the state at step \"global_step\".\n self._save(session, global_step)\n self._timer.update_last_triggered_step(global_step)\n\n def before_run(self, run_context): # pylint: disable=unused-argument\n return SessionRunArgs(self._global_step_tensor)\n\n def after_run(self, run_context, run_values):\n global_step = run_context.session.run(self._global_step_tensor)\n if self._timer.should_trigger_for_step(global_step):\n self._timer.update_last_triggered_step(global_step)\n logging.info(\"Triggering checkpoint. %s\", global_step)\n if self._save(run_context.session, global_step):\n run_context.request_stop()\n\n def end(self, session):\n if self._save_thread:\n logging.info(\"Waiting for any pending checkpoints to finish.\")\n self._save_thread.join()\n if self._write_graph_thread:\n logging.info(\"Waiting for any pending write_graph to finish.\")\n self._write_graph_thread.join()\n\n last_step = session.run(self._global_step_tensor)\n\n # Save the last checkpoint synchronously if needed.\n if last_step != self._timer.last_triggered_step():\n self._save(session, last_step, asynchronous=False)\n\n for l in self._listeners:\n l.end(session, last_step)\n\n def _save(self, session, step, asynchronous=True):\n \"\"\"Saves the latest checkpoint, returns should_stop.\"\"\"\n\n # Skip saving on step 0\n if step == 0:\n return\n\n def _save_fn():\n \"\"\"Run the saver process.\"\"\"\n logging.info(\"Saving checkpoints for %d into %s.\", step, self._save_path)\n\n start_time = time.time()\n for l in self._listeners:\n l.before_save(session, step)\n\n self._get_saver().save(session, self._save_path, global_step=step)\n self._summary_writer.add_session_log(\n SessionLog(\n status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),\n step)\n\n for l in self._listeners:\n l.after_save(session, step)\n\n end_time = time.time()\n logging.info(\"Checkpoint actual writing time: (%.3f sec)\",\n end_time - start_time)\n logging.info(\"Checkpoint finished for %d into %s.\", step, self._save_path)\n\n if not asynchronous:\n _save_fn()\n return\n\n if self._save_thread is not None:\n self._save_thread.join(timeout=0.1)\n if self._save_thread.is_alive():\n logging.info(\"Saver thread still in progress, skipping checkpoint.\")\n return\n\n self._save_thread = threading.Thread(target=_save_fn)\n self._save_thread.start()\n\n def _get_saver(self):\n if self._saver is not None:\n return self._saver\n elif self._scaffold is not None:\n return self._scaffold.saver\n\n # Get saver from the SAVERS collection if present.\n collection_key = ops.GraphKeys.SAVERS\n savers = ops.get_collection(collection_key)\n if not savers:\n raise RuntimeError(\n \"No items in collection {}. Please add a saver to the collection \"\n \"or provide a saver or scaffold.\".format(collection_key))\n elif len(savers) > 1:\n raise RuntimeError(\n \"More than one item in collection {}. \"\n \"Please indicate which one to use by passing it to the constructor.\"\n .format(collection_key))\n\n self._saver = savers[0]\n return savers[0]\n" ]
[ [ "tensorflow.contrib.cluster_resolver.python.training.tpu_cluster_resolver.TPUClusterResolver", "tensorflow.contrib.tpu.python.tpu.tpu.rewrite", "tensorflow.contrib.tpu.python.ops.tpu_ops.infeed_dequeue_tuple", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.keras.backend.dtype", "numpy.concatenate", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.math_ops.to_float", "tensorflow.python.framework.ops.device", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.keras.engine.training_utils.check_num_samples", "tensorflow.python.keras.metrics.clone_metrics", "tensorflow.python.ops.math_ops.tensordot", "tensorflow.python.keras.models.clone_model", "scipy.sparse.issparse", "numpy.arange", "tensorflow.contrib.tpu.python.ops.tpu_ops.outfeed_enqueue_tuple", "tensorflow.python.keras.engine.training_utils.has_symbolic_tensors", "tensorflow.contrib.tpu.python.ops.tpu_ops.cross_replica_sum", "tensorflow.contrib.tpu.python.tpu.keras_tpu_variables.replicated_scope", "tensorflow.contrib.tpu.proto.compilation_result_pb2.CompilationResultProto", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.keras.backend.get_session", "tensorflow.python.framework.ops.control_dependencies", "numpy.zeros", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.keras.utils.generic_utils.slice_arrays", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.keras.utils.generic_utils.make_batches", "tensorflow.python.keras.callbacks.configure_callbacks", "tensorflow.python.ops.array_ops.one_hot", "tensorflow.contrib.tpu.python.ops.tpu_ops.infeed_enqueue_tuple", "tensorflow.contrib.tpu.python.tpu.tpu_function.get_tpu_context", "tensorflow.python.client.session.Session", "tensorflow.python.keras.backend.get_value", "tensorflow.contrib.tpu.python.tpu.tpu.initialize_system", "tensorflow.contrib.tpu.python.tpu.keras_tpu_variables.replicated_variable_for_optimizer", "numpy.array", "tensorflow.contrib.tpu.python.tpu.tpu_system_metadata._query_tpu_system_metadata", "tensorflow.python.framework.tensor_spec.TensorSpec", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.keras.engine.training_utils.batch_shuffle", "numpy.random.shuffle", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.ops.array_ops.reshape", "tensorflow.contrib.tpu.python.tpu.tpu_optimizer.CrossShardOptimizer", "tensorflow.contrib.tpu.python.ops.tpu_ops.outfeed_dequeue_tuple", "tensorflow.python.keras.optimizers.TFOptimizer", "tensorflow.python.keras.backend.set_session", "tensorflow.python.keras.backend.learning_phase", "tensorflow.python.keras.engine.training_arrays.test_loop", "tensorflow.python.keras.backend.is_sparse", "tensorflow.python.keras.backend.clear_session", "tensorflow.python.keras.backend.set_learning_phase", "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.training.summary_io.SummaryWriterCache.get", "tensorflow.python.framework.ops.get_collection", "tensorflow.python.training.training_util._get_or_create_global_step_read", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.core.util.event_pb2.SessionLog", "tensorflow.python.training.basic_session_run_hooks.SecondOrStepTimer", "tensorflow.python.training.session_run_hook.SessionRunArgs" ] ]
z-vasconcelos/udacity_facial_keypoints
[ "96ef3b08714e224a3aed60154b133fdc595ff5ca" ]
[ "models.py" ]
[ "## TODO: define the convolutional neural network architecture\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n# can use the below import should you choose to initialize the weights of your Net\nimport torch.nn.init as I\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n \n #ref https://arxiv.org/pdf/1710.00977.pdf\n #NaimishNet consists of 4 convolution2d layers,\n #4 maxpool-ing2d layers and 3 dense layers,\n \n ## TODO: Define all the layers of this CNN, the only requirements are:\n ## 1. This network takes in a square (same width and height), grayscale image as input\n ## 2. It ends with a linear layer that represents the keypoints\n ## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs\n \n # As an example, you've been given a convolutional layer, which you may (but don't have to) change:\n # 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel\n \n ## Note that among the layers to add, consider including:\n # maxpooling layers - V\n # multiple conv layers - V\n # fully-connected layers V\n ##and other layers (such as \n # dropout - V\n # or batch normalization -V\n ##to avoid overfitting\n \n #img 224x224\n #input image width/height, W, minus the filter size, F, divided by the stride, S, all + 1\n ## output size = (W-F)/S +1\n \n # Dropouts NaimishNet\n \n ## output size = (W-F)/S +1 = (224-5)/1 +1 = 220\n ## output size = (W-F)/S +1 = (110-5)/1 +1 = 53\n ## output size = (W-F)/S +1 = (53-3)/1 +1 = 25\n ## output size = (W-F)/S +1 = (25-3)/1 +1 = 11\n \n ## output size = (W-F)/S +1 = (224-5)/1 +1 = 220\n ## output size = (W-F)/S +1 = (110-3)/1 +1 = 54\n ## output size = (W-F)/S +1 = (54-3)/1 +1 = 26\n ## output size = (W-F)/S +1 = (26-2)/1 +1 = 12\n \n #Dropouts from 0.1 to 0.6 #changed\n #1\n self.conv1 = nn.Conv2d(1, 32, 5)\n self.pool = nn.MaxPool2d(2, 2) # 1st good res was without MaxPool here\n self.bn2 = nn.BatchNorm2d(32)\n self.fc1_drop = nn.Dropout(p=0.1)\n #2\n self.conv2 = nn.Conv2d(32, 64, 3)\n self.pool = nn.MaxPool2d(2, 2)\n self.bn2 = nn.BatchNorm2d(64)\n self.fc2_drop = nn.Dropout(p=0.2)\n #3\n self.conv3 = nn.Conv2d(64, 128, 3)\n self.pool = nn.MaxPool2d(2, 2)\n self.bn2 = nn.BatchNorm2d(128)\n self.fc3_drop = nn.Dropout(p=0.2) #from 0.3 to 0.2\n #4\n self.conv4 = nn.Conv2d(128, 256, 2)\n self.pool = nn.MaxPool2d(2, 2)\n self.bn2 = nn.BatchNorm2d(256)\n self.fc4_drop = nn.Dropout(p=0.3) #from 0.4 to 0.3\n \n #256*11*11 = 30976\n #256*12*12 = 36864\n self.fc1 = nn.Linear(36864, 1000)\n self.fc5_drop = nn.Dropout(p=0.5)\n \n self.fc2 = nn.Linear(1000, 1000)\n self.fc6_drop = nn.Dropout(p=0.6)\n \n # finally, create 136 output channels (for the 10 classes)\n self.fc3 = nn.Linear(1000, 136) \n \n def forward(self, x):\n ## TODO: Define the feedforward behavior of this model\n ## x is the input image and, as an example, here you may choose to include a pool/conv step:\n ## x = self.pool(F.relu(self.conv1(x)))\n \n # conv/relu + pool layers\n x = self.pool(F.relu(self.conv1(x)))\n x = self.fc1_drop(x) \n x = self.pool(F.relu(self.conv2(x)))\n x = self.fc2_drop(x) \n x = self.pool(F.relu(self.conv3(x)))\n x = self.fc3_drop(x) \n x = self.pool(F.relu(self.conv4(x)))\n x = self.fc4_drop(x) \n \n # prep for linear layer\n # this line of code is the equivalent of Flatten in Keras\n x = x.view(x.size(0), -1)\n \n # two linear layers with dropout in between\n x = F.relu(self.fc1(x))\n x = self.fc5_drop(x)\n x = F.relu(self.fc2(x))\n x = self.fc6_drop(x)\n x = self.fc3(x)\n \n # a modified x, having gone through all the layers of your model, should be returned\n return x\n" ]
[ [ "torch.nn.Dropout", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d" ] ]
wolaituodiban/toad
[ "9aa755273c8fc055c8a244f244e5437c189aa884" ]
[ "toad/nn/loss.py" ]
[ "from typing import Dict, List\n\nimport torch\nfrom torch.nn import Module\n\nfrom .functional import focal_loss\n\n\nclass FocalLoss(Module):\n def __init__(self, alpha = 1., gamma = 2., reduction = 'mean'):\n super(FocalLoss, self).__init__()\n \n self.alpha = alpha\n self.gamma = gamma\n self.reduction = reduction\n\n def forward(self, input, target):\n return focal_loss(\n input,\n target,\n alpha = self.alpha,\n gamma = self.gamma,\n reduction = self.reduction,\n )\n\n\nclass DictLoss(Module):\n def __init__(self, torch_loss, weights: Dict[str, float] = None):\n super(DictLoss, self).__init__()\n self.torch_loss = torch_loss\n self.weights = weights or {}\n\n def forward(self, input: Dict[str, torch.Tensor], target: Dict[str, torch.Tensor]):\n loss = 0\n weight_sum = 0\n for key, _target in target.items():\n if key not in input:\n continue\n weight = self.weights.get(key, 1)\n mask = torch.bitwise_not(torch.isnan(_target))\n _target = _target.to(input[key].device)\n loss += weight * self.torch_loss(input[key][mask], _target[mask])\n weight_sum += weight\n\n return loss / weight_sum\n\n\nclass ListLoss(Module):\n def __init__(self, torch_loss, weights: List[float] = None):\n super(ListLoss, self).__init__()\n self.torch_loss = torch_loss\n self.weights = weights\n\n def forward(self, input: List[torch.Tensor], target: List[torch.Tensor]):\n loss = 0\n weight_sum = 0\n for i, (_input, _target) in enumerate(zip(input, target)):\n if self.weights:\n weight = self.weights[i]\n else:\n weight = 1\n _target = _target.to(_input.device)\n mask = torch.bitwise_not(torch.isnan(_target))\n loss += weight * self.torch_loss(_input[mask], _target[mask])\n weight_sum += weight\n\n return loss / weight_sum\n" ]
[ [ "torch.isnan" ] ]
vic9527/ViClassifier
[ "fd6c4730e880f35a9429277a6025219315e067cc" ]
[ "converters/pt2wts.py" ]
[ "def convert(pth_path, wts_path, device_type='cuda'):\n import struct\n import torch\n from viclassifier.utils import dev_opt\n\n device = dev_opt.usingDevice(device_type)\n model = torch.load(pth_path, map_location=device)\n model.to(device)\n # 测试时不启用 BatchNormalization 和 Dropout\n model.eval()\n\n # print('model: ', model)\n # print('state dict: ', model.state_dict().keys())\n\n # # 生成数据测试\n # tmp = torch.ones(1, 3, 224, 224).to(device)\n # print('input: ', tmp)\n # out = model(tmp)\n # print('output:', out)\n\n f = open(wts_path, 'w')\n f.write(\"{}\\n\".format(len(model.state_dict().keys())))\n for k, v in model.state_dict().items():\n # print('key: ', k)\n # print('value: ', v.shape)\n vr = v.reshape(-1).cpu().numpy() #在CPU上执行\n f.write(\"{} {}\".format(k, len(vr)))\n print(\"{} {}\".format(k, len(vr)))\n for vv in vr:\n f.write(\" \")\n # print(\" \")\n f.write(struct.pack(\">f\", float(vv)).hex())\n # print(struct.pack(\">f\", float(vv)).hex())\n f.write(\"\\n\")\n # print(\"\\n\")\n\n\nif __name__ == \"__main__\":\n import os, sys\n\n viclassifier_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n print('viclassifier_dir:', viclassifier_dir)\n sys.path.append(viclassifier_dir)\n\n pth_path = r'../examples/model.pth'\n wts_path = r'../examples/model.wts'\n convert(pth_path, wts_path)\n" ]
[ [ "torch.load" ] ]
mattwachter/extract-diode-parameters
[ "4def6deef7c4937d7e1706809e68d7466ee97d7f" ]
[ "diode_equations.py" ]
[ "\"\"\" Physics equations describing diode behavior\n\"\"\"\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport scipy.constants as const\nfrom scipy.special import lambertw\n\nfrom diode_utils import *\n\n\ndef ideal_diode_eq(v_ca, i_s, m, T):\n \"\"\"Shockley Diode equation\n\n Args:\n v_ca (float): Cathode-Anode voltage [V].\n i_s_log (float): Saturation current [A].\n m (float): Ideality factor.\n T (float): Temperature [T].\n\n Returns:\n (float): Diode current I_C [A]\n \"\"\"\n i_c = i_s * (np.exp((v_ca * const.e)/(m * const.k * T)) - 1)\n return i_c\n\ndef get_ideal_diode_eq_log(T):\n \"\"\" Wrapper function for ideal_diode_eq_log.\n\n Needed for scipy.optimize.curve_fit()\n TODO: Clarify description! more elegant solution?\n \"\"\"\n def ideal_diode_eq_log_internal(v_ca, i_s_log, m):\n \"\"\"Log of Shockley Diode equation\n\n Log of exponential equation facilates curve fitting\n Shockley Diode equation:\n i_c = i_s * (exp(v_ca * const.e)/(m * const.k * T) - 1)\n For v_ca >> v_t = .(const.k * T)/const.e = 0.026V * T/300K:\n ic approxeq i_s * exp(v_ca / (v_t * m))\n log(ic) approxeq log(i_s) + v_ca / (v_t * m)\n\n TODO: handle Temperature! (automatically ?)\n Args:\n v_ca (float): Cathode-Anode voltage.\n i_s_log (float): Log of saturation current.\n m (float): Ideality factor.\n T (float): Temperature in Kelvin, defaults to 298.0\n\n Returns:\n i_c_log (float): logarithm of diode current\n \"\"\"\n i_c_log = i_s_log + v_ca * (const.e/(m * const.k * T))\n return i_c_log\n return ideal_diode_eq_log_internal\n\n\ndef ohmic_resistance_diode(v_ca, i_c, i_s, m, T=298.0):\n \"\"\"TODO: Does not work, delete??\n Args:\n v_ca (float): Cathode-Anode voltage.\n i_c (float): Diode current.\n i_s (float): Saturation current.\n m (float): Ideality factor (model parameter)\n T (float): Temperature in Kelvin, defaults to 298.0\n\n Returns:\n [type]: [description]\n \"\"\"\n r_D = (v_ca - np.log((i_c + i_s) / i_s) * const.k * T / const.e) / i_c\n return r_D\n\n\ndef ic_diode_ohmic(v_ca, i_s, m, T, r_S):\n \"\"\"Calculate current of an ideal diode in series with a resistor\n\n Args:\n v_ca (float): Cathode-Anode voltage [V].\n i_s (float): Saturation current [A].\n m (float): Ideality factor (model parameter)\n T (float): Temperature [K], defaults to 298.0\n r_S (float): Ohmic diode resistance\n\n Returns:\n i_c_a (float array): Diode current\n \"\"\"\n # Ideal diode and ohmic resistance in series:\n # ln((i_c+i_s)/i_s) * v_t + i_c*r_S -v_ca = 0\n # sympy.solve(log((x+a)/a)*b+c*x-d, x) = [(-a*c +\n # b*LambertW(a*c*exp((a*c + d)/b)/b))/c]\n v_t = (const.k * T *m) / const.e\n i_c = ((-i_s * r_S + v_t * lambertw(i_s*r_S * np.exp((i_s*r_S + v_ca)/v_t)/v_t))/r_S)\n i_c = np.real(i_c) # Avoid warning; imaginary part is already zero\n return i_c\n\n\ndef ideal_diode_model(v_ca_a , i_c_a , vca_lim_lower=0.65,\n vca_lim_upper=0.8, T=298.0):\n \"\"\"Calculate a best fit model for the Shockley Diode equation\n\n Args:\n v_ca_a (float array): Cathode-Anode voltage.\n i_c_a (float array): Diode current.\n vca_lim_lower (float, optional):\n Lower limit in Volt of range the model is based on.\n Defaults to 0.65.\n vca_lim_upper (float, optional): [description].\n Upper limit in Volt of range the model is based on.\n Defaults to 0.8.\n\n Returns:\n i_s (float): Saturation current (model parameter).\n m (float): Ideality factor (model parameter)\n \"\"\"\n v_ca_cropped, i_c_cropped = crop_data_range_to_x(v_ca_a , i_c_a ,\n vca_lim_lower, vca_lim_upper)\n\n log_vec = np.vectorize(np.log)\n diode_eq_T = get_ideal_diode_eq_log(T)\n p_opt, pcov = curve_fit(diode_eq_T, v_ca_cropped, log_vec(i_c_cropped))\n i_s = np.exp(p_opt[0]) # Result of ideal_diode_eq_log is log(i_s)\n m = p_opt[1]\n\n return (i_s, m)\n\n\ndef diode_capacitance_TT_eq(i_c, tt):\n \"\"\"Diode capacitance as function of diode current and transit time\n\n Diffusion capacitance is linearly dependent on diode current.\n Args:\n i_c (float: Diode current.\n tt (float): Transit time.\n\n Returns:\n (float): Diode capacitance.\n \"\"\"\n c_ca = tt * i_c\n return c_ca\n\n\ndef diode_capacitance_model(v_ca_a , i_c_a , c_ca_a , vca_lim_lower=0.65,\n vca_lim_upper=0.8):\n \"\"\"Calculate a best fit model for the diffusion capacitance\n\n Args:\n v_ca_a (float array): Cathode-Anode voltage.\n i_c_a (float array): Diode current.\n c_ca_a (float array): Diode capacitance.\n vca_lim_lower (float, optional):\n Lower limit in Volt of range the model is based on.\n Defaults to 0.65.\n vca_lim_upper (float, optional): [description].\n Upper limit in Volt of range the model is based on.\n Defaults to 0.8.\n\n Returns:\n tt (float): Transit time.\n \"\"\"\n v_ca_cropped, c_ca_cropped = crop_data_range_to_x(v_ca_a , c_ca_a ,\n vca_lim_lower, vca_lim_upper)\n v_ca_cropped, i_c_cropped = crop_data_range_to_x(v_ca_a , i_c_a ,\n vca_lim_lower, vca_lim_upper)\n # Map to log to reduce numerical error\n # log_vec = np.vectorize(np.log)\n # p_opt, pcov = curve_fit(diode_capacitance_TT_eq, log_vec(i_c_cropped),\n # log_vec(c_ca_cropped))\n p_opt, pcov = curve_fit(diode_capacitance_TT_eq, i_c_cropped,\n c_ca_cropped)\n tt = p_opt[0] # Transit time\n return tt\n\n\ndef diode_saturation_current(i_s_0, T):\n \"\"\"Temperature dependent saturation current.\n\n As described by Michael Schroter in the HICUM Model:\n https://www.iee.et.tu-dresden.de/iee/eb/forsch/Hicum_PD/HicumQ/hicumL2V2p4p0.va\n (line 1150) for an internal Base collector diode saturation current:\n ibcis_t = ibcis*exp(zetabci*ln_qtt0+vgc/VT*(qtt0-1))\n\n Args:\n i_s_0 (float): I_S [A] at T=300.15K.\n T ([type]): Temperature [K].\n\n Returns:\n float: I_S(T) [A]\n \"\"\"\n zetaci = 0.0 # Line 813\n # Line 824 \"Coefficient K1 in T-dependent band-gap equation\":\n f1vg = -1.02377e-4\n mg = 3 - const.e * f1vg / const.k\n zetabci = mg + 1 - zetaci # Line 1025\n vgc = 1.17 # Collector bandgap voltage silicon\n VT = (const.k * T) / const.e\n T_nom = 300.15 # Nominal temperature [K]\n qtt0 = T / T_nom\n ln_qtt0 = np.log(qtt0)\n # i_s = i_s_0 * np.exp(zetabci * ln_qtt0 + vgc/VT * (qtt0-1))\n # Original\n # i_s = i_s_0 * np.exp(zetabci * ln_qtt0) * np.exp(vgc/VT * (qtt0-1))\n # Simplified version gets better results:\n i_s = i_s_0 * np.exp(vgc/VT * (qtt0-1))\n return i_s\n\n\ndef diode_saturation_current_log(i_s_0_log, T, zeta):\n \"\"\"Log of temperature dependent saturation current.\n\n As described by Michael Schroter in the HICUM Model:\n https://www.iee.et.tu-dresden.de/iee/eb/forsch/Hicum_PD/HicumQ/hicumL2V2p4p0.va\n (line 1150) for an internal Base collector diode saturation current:\n ibcis_t = ibcis*exp(zetabci*ln_qtt0+vgc/VT*(qtt0-1))\n Args:\n i_s_0 (float): I_S [A] at T=300.15K.\n T ([type]): Temperature [K].\n\n Returns:\n float: I_S(T) [A]\n \"\"\"\n # zetaci = 0.0 # Line 813\n # # Line 824 \"Coefficient K1 in T-dependent band-gap equation\":\n # f1vg = -1.02377e-4\n # mg = 3 - const.e * f1vg / const.k\n # zetabci = mg + 1 - zetaci # Line 1025\n # vgc = 1.17 # Bandgap voltage silicon\n # VT = (const.k * T) / const.e\n # T_nom = 300.15 # Nominal temperature [K]\n # qtt0 = T / T_nom\n # ln_qtt0 = np.log(qtt0)\n # i_s = i_s_0 * np.exp(zetabci * ln_qtt0 + vgc/VT * (qtt0-1))\n i_s_log = i_s_0_log + zeta * np.log(T/300.15) + ((1.17*const.e) / (const.k*T)) * (T/300.15-1)\n return i_s_log\n\n\ndef diode_saturation_current_0(i_s, T):\n \"\"\"Saturation current at nominal temperature.\n\n As described by Michael Schroter in hicumL2V2p4p0_internal.va\n (line 1150) for an internal Base collector diode saturation current:\n ibcis_t = ibcis*exp(zetabci*ln_qtt0+vgc/VT*(qtt0-1))\n TODO Reacts too strongly to slight deviations in temperature\n (298.0K vs 300.15K)\n Args:\n i_s (float): I_S [A] at T.\n T ([type]): Temperature [K].\n\n Returns:\n float: I_S(T=300.15K) [A]\n \"\"\"\n zetaci = 0.0 # Line 813\n # Line 824 \"Coefficient K1 in T-dependent band-gap equation\":\n f1vg = -1.02377e-4\n mg = 3 - const.e * f1vg / const.k\n zetabci = mg + 1 - zetaci # Line 1025\n vgc = 1.17 # Bandgap voltage silicon\n VT = (const.k * 300) / const.e\n T_nom = 300.15 # Nominal temperature [K]\n qtt0 = T / T_nom\n ln_qtt0 = np.log(qtt0)\n # i_s = i_s_0 * np.exp(zetabci * ln_qtt0 + vgc/VT * (qtt0-1))\n exponent = vgc/VT * (qtt0-1)\n i_s_0 = i_s / np.exp(exponent) # Better results\n return i_s_0\n\n\ndef calc_rd_deriv_a(v_ca_a , i_c_a ):\n \"\"\"Resistance array as a derivative of voltage over current.\n\n TODO Check input\n Args:\n v_ca_a (float array): Cathode-Anode voltage [V].\n i_c_a (float array): Diode current.\n\n Returns:\n (float array): Differential resistance r_D [Ohm]\n \"\"\"\n r_D = np.gradient(v_ca_a , i_c_a )\n return r_D\n" ]
[ [ "numpy.log", "numpy.gradient", "numpy.real", "numpy.vectorize", "numpy.exp", "scipy.optimize.curve_fit" ] ]
Harshit4199/3D-imaging-Hippocampal-segmentation
[ "3ba8c0cf354395e4514fc84756b824be18e32eed" ]
[ "section_2/src/data_prep/SlicesDataset.py" ]
[ "\"\"\"\nModule for Pytorch dataset representations\n\"\"\"\n\nimport torch\nfrom torch.utils.data import Dataset\n\nclass SlicesDataset(Dataset):\n \"\"\"\n This class represents an indexable Torch dataset\n which could be consumed by the PyTorch DataLoader class\n \"\"\"\n def __init__(self, data):\n self.data = data\n\n self.slices = []\n\n for i, d in enumerate(data):\n for j in range(d[\"image\"].shape[0]):\n self.slices.append((i, j))\n\n def __getitem__(self, idx):\n \"\"\"\n This method is called by PyTorch DataLoader class to return a sample with id idx\n Arguments: \n idx {int} -- id of sample\n Returns:\n Dictionary of 2 Torch Tensors of dimensions [1, W, H]\n \"\"\"\n slc = self.slices[idx]\n sample = dict()\n sample[\"id\"] = idx\n\n # You could implement caching strategy here if dataset is too large to fit\n # in memory entirely\n # Also this would be the place to call transforms if data augmentation is used\n \n # TASK: Create two new keys in the \"sample\" dictionary, named \"image\" and \"seg\"\n # The values are 3D Torch Tensors with image and label data respectively. \n # First dimension is size 1, and last two hold the voxel data from the respective\n # slices. Write code that stores the 2D slice data in the last 2 dimensions of the 3D Tensors. \n # Your tensor needs to be of shape [1, patch_size, patch_size]\n # Don't forget that you need to put a Torch Tensor into your dictionary element's value\n # Hint: your 3D data sits in self.data variable, the id of the 3D volume from data array\n # and the slice number are in the slc variable. \n # Hint2: You can use None notation like so: arr[None, :] to add size-1 \n # dimension to a Numpy array\n image_slice = self.data[slc[0]][\"image\"][slc[1]]\n sample[\"image\"] = torch.from_numpy(image_slice).type(torch.cuda.FloatTensor).unsqueeze(0)\n \n label_slice = self.data[slc[0]][\"seg\"][slc[1]]\n sample[\"seg\"] = torch.from_numpy(label_slice).type(torch.cuda.FloatTensor).unsqueeze(0)\n return sample\n\n def __len__(self):\n \"\"\"\n This method is called by PyTorch DataLoader class to return number of samples in the dataset\n Returns:\n int\n \"\"\"\n return len(self.slices)" ]
[ [ "torch.from_numpy" ] ]
sathishksankarpandi/ktrain
[ "bc9169661592ee14b95d9e56622dbb20e66b3568" ]
[ "ktrain/tests/test_textclassification.py" ]
[ "#!/usr/bin/env python3\n\"\"\"\nTests of ktrain text classification flows\n\"\"\"\nimport sys\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\";\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\nsys.path.insert(0,'../..')\nimport IPython\nfrom unittest import TestCase, main, skip\nimport numpy as np\nimport ktrain\nfrom ktrain import text as txt\nTEST_DOC = 'god christ jesus mother mary church sunday lord heaven amen'\n\nclass TestTextClassification(TestCase):\n\n def setUp(self):\n # fetch the dataset using scikit-learn\n categories = ['alt.atheism', 'soc.religion.christian',\n 'comp.graphics', 'sci.med']\n from sklearn.datasets import fetch_20newsgroups\n train_b = fetch_20newsgroups(subset='train',\n categories=categories, shuffle=True, random_state=42)\n test_b = fetch_20newsgroups(subset='test',\n categories=categories, shuffle=True, random_state=42)\n print('size of training set: %s' % (len(train_b['data'])))\n print('size of validation set: %s' % (len(test_b['data'])))\n print('classes: %s' % (train_b.target_names))\n x_train = train_b.data\n y_train = train_b.target\n x_test = test_b.data\n y_test = test_b.target\n self.trn = (x_train, y_train)\n self.val = (x_test, y_test)\n self.classes = train_b.target_names\n\n\n\n def test_fasttext(self):\n trn, val, preproc = txt.texts_from_array(x_train=self.trn[0], \n y_train=self.trn[1],\n x_test=self.val[0], \n y_test=self.val[1],\n class_names=self.classes,\n preprocess_mode='standard',\n maxlen=350, \n max_features=35000)\n model = txt.text_classifier('fasttext', train_data=trn)\n learner = ktrain.get_learner(model, train_data=trn, val_data=val, batch_size=32)\n lr = 0.01\n hist = learner.fit(lr, 10, cycle_len=1)\n\n # test training results\n self.assertAlmostEqual(max(hist.history['lr']), lr)\n self.assertGreater(max(hist.history['val_acc']), 0.8)\n\n\n # test top losses\n obs = learner.top_losses(n=1, val_data=None)\n self.assertIn(obs[0][0], list(range(len(val[0]))))\n learner.view_top_losses(preproc=preproc, n=1, val_data=None)\n\n # test weight decay\n self.assertEqual(len(learner.get_weight_decay()), 2)\n self.assertEqual(learner.get_weight_decay()[0], None)\n learner.set_weight_decay(1e-4)\n self.assertAlmostEqual(learner.get_weight_decay()[0], 1e-4)\n\n # test load and save model\n learner.save_model('/tmp/test_model')\n learner.load_model('/tmp/test_model')\n\n # test validate\n cm = learner.validate()\n print(cm)\n for i, row in enumerate(cm):\n self.assertEqual(np.argmax(row), i)\n\n # test predictor\n p = ktrain.get_predictor(learner.model, preproc)\n self.assertEqual(p.predict([TEST_DOC])[0], 'soc.religion.christian')\n p.save('/tmp/test_predictor')\n p = ktrain.load_predictor('/tmp/test_predictor')\n self.assertEqual(p.predict(TEST_DOC), 'soc.religion.christian')\n self.assertEqual(np.argmax(p.predict_proba([TEST_DOC])[0]), 3)\n self.assertEqual(type(p.explain(TEST_DOC)), IPython.core.display.HTML)\n\n\n def test_nbsvm(self):\n trn, val, preproc = txt.texts_from_array(x_train=self.trn[0], \n y_train=self.trn[1],\n x_test=self.val[0], \n y_test=self.val[1],\n class_names=self.classes,\n preprocess_mode='standard',\n maxlen=700, \n max_features=35000,\n ngram_range=3)\n model = txt.text_classifier('nbsvm', train_data=trn)\n learner = ktrain.get_learner(model, train_data=trn, val_data=val, batch_size=32)\n lr = 0.01\n hist = learner.fit_onecycle(lr, 10)\n\n # test training results\n self.assertAlmostEqual(max(hist.history['lr']), lr)\n self.assertGreater(max(hist.history['val_acc']), 0.92)\n self.assertAlmostEqual(max(hist.history['momentum']), 0.95)\n self.assertAlmostEqual(min(hist.history['momentum']), 0.85)\n\n\n # test top losses\n obs = learner.top_losses(n=1, val_data=None)\n self.assertIn(obs[0][0], list(range(len(val[0]))))\n learner.view_top_losses(preproc=preproc, n=1, val_data=None)\n\n # test weight decay\n self.assertEqual(len(learner.get_weight_decay()), 0)\n learner.set_weight_decay(1e-4)\n self.assertEqual(len(learner.get_weight_decay()), 0)\n\n # test load and save model\n learner.save_model('/tmp/test_model')\n learner.load_model('/tmp/test_model')\n\n # test validate\n cm = learner.validate()\n print(cm)\n for i, row in enumerate(cm):\n self.assertEqual(np.argmax(row), i)\n\n # test predictor\n p = ktrain.get_predictor(learner.model, preproc)\n self.assertEqual(p.predict([TEST_DOC])[0], 'soc.religion.christian')\n p.save('/tmp/test_predictor')\n p = ktrain.load_predictor('/tmp/test_predictor')\n self.assertEqual(p.predict(TEST_DOC), 'soc.religion.christian')\n self.assertEqual(np.argmax(p.predict_proba([TEST_DOC])[0]), 3)\n self.assertEqual(type(p.explain(TEST_DOC)), IPython.core.display.HTML)\n\n def test_logreg(self):\n trn, val, preproc = txt.texts_from_array(x_train=self.trn[0], \n y_train=self.trn[1],\n x_test=self.val[0], \n y_test=self.val[1],\n class_names=self.classes,\n preprocess_mode='standard',\n maxlen=700, \n max_features=35000,\n ngram_range=3)\n model = txt.text_classifier('nbsvm', train_data=trn)\n learner = ktrain.get_learner(model, train_data=trn, val_data=val, batch_size=32)\n lr = 0.01\n hist = learner.autofit(lr)\n\n # test training results\n self.assertAlmostEqual(max(hist.history['lr']), lr)\n self.assertGreater(max(hist.history['val_acc']), 0.92)\n self.assertAlmostEqual(max(hist.history['momentum']), 0.95)\n self.assertAlmostEqual(min(hist.history['momentum']), 0.85)\n\n\n # test top losses\n obs = learner.top_losses(n=1, val_data=None)\n self.assertIn(obs[0][0], list(range(len(val[0]))))\n learner.view_top_losses(preproc=preproc, n=1, val_data=None)\n\n # test weight decay\n self.assertEqual(len(learner.get_weight_decay()), 0)\n learner.set_weight_decay(1e-4)\n self.assertEqual(len(learner.get_weight_decay()), 0)\n\n # test load and save model\n learner.save_model('/tmp/test_model')\n learner.load_model('/tmp/test_model')\n\n # test validate\n cm = learner.validate()\n print(cm)\n for i, row in enumerate(cm):\n self.assertEqual(np.argmax(row), i)\n\n # test predictor\n p = ktrain.get_predictor(learner.model, preproc)\n self.assertEqual(p.predict([TEST_DOC])[0], 'soc.religion.christian')\n p.save('/tmp/test_predictor')\n p = ktrain.load_predictor('/tmp/test_predictor')\n self.assertEqual(p.predict(TEST_DOC), 'soc.religion.christian')\n self.assertEqual(np.argmax(p.predict_proba([TEST_DOC])[0]), 3)\n self.assertEqual(type(p.explain(TEST_DOC)), IPython.core.display.HTML)\n\n\n #@skip('temporarily disabled')\n def test_bigru(self):\n trn, val, preproc = txt.texts_from_array(x_train=self.trn[0], \n y_train=self.trn[1],\n x_test=self.val[0], \n y_test=self.val[1],\n class_names=self.classes,\n preprocess_mode='standard',\n maxlen=350, \n max_features=35000,\n ngram_range=1)\n model = txt.text_classifier('bigru', train_data=trn, preproc=preproc)\n learner = ktrain.get_learner(model, train_data=trn, val_data=val, batch_size=32)\n lr = 0.01\n hist = learner.autofit(lr, 1)\n\n # test training results\n self.assertAlmostEqual(max(hist.history['lr']), lr)\n self.assertGreater(max(hist.history['val_acc']), 0.9)\n self.assertAlmostEqual(max(hist.history['momentum']), 0.95)\n self.assertAlmostEqual(min(hist.history['momentum']), 0.85)\n\n\n # test top losses\n obs = learner.top_losses(n=1, val_data=None)\n self.assertIn(obs[0][0], list(range(len(val[0]))))\n learner.view_top_losses(preproc=preproc, n=1, val_data=None)\n\n # test weight decay\n self.assertEqual(len(learner.get_weight_decay()), 1)\n self.assertEqual(learner.get_weight_decay()[0], None)\n learner.set_weight_decay(1e-4)\n self.assertAlmostEqual(learner.get_weight_decay()[0], 1e-4)\n\n\n # test load and save model\n learner.save_model('/tmp/test_model')\n learner.load_model('/tmp/test_model')\n\n # test validate\n cm = learner.validate()\n print(cm)\n for i, row in enumerate(cm):\n self.assertEqual(np.argmax(row), i)\n\n # test predictor\n p = ktrain.get_predictor(learner.model, preproc)\n self.assertEqual(p.predict([TEST_DOC])[0], 'soc.religion.christian')\n p.save('/tmp/test_predictor')\n p = ktrain.load_predictor('/tmp/test_predictor')\n self.assertEqual(p.predict([TEST_DOC])[0], 'soc.religion.christian')\n self.assertEqual(p.predict(TEST_DOC), 'soc.religion.christian')\n self.assertEqual(np.argmax(p.predict_proba([TEST_DOC])[0]), 3)\n self.assertEqual(type(p.explain(TEST_DOC)), IPython.core.display.HTML)\n\n\n #@skip('temporarily disabled')\n def test_bert(self):\n trn, val, preproc = txt.texts_from_array(x_train=self.trn[0], \n y_train=self.trn[1],\n x_test=self.val[0], \n y_test=self.val[1],\n class_names=self.classes,\n preprocess_mode='bert',\n maxlen=350, \n max_features=35000)\n model = txt.text_classifier('bert', train_data=trn)\n learner = ktrain.get_learner(model, train_data=trn, batch_size=6)\n lr = 2e-5\n hist = learner.fit_onecycle(lr, 1)\n\n # test training results\n self.assertAlmostEqual(max(hist.history['lr']), lr)\n self.assertGreater(max(hist.history['acc']), 0.7)\n\n\n # test top losses\n obs = learner.top_losses(n=1, val_data=val)\n self.assertIn(obs[0][0], list(range(len(val[0][0]))))\n learner.view_top_losses(preproc=preproc, n=1, val_data=val)\n\n # test weight decay\n self.assertEqual(len(learner.get_weight_decay()), 2)\n self.assertEqual(learner.get_weight_decay()[0], None)\n learner.set_weight_decay(1e-4)\n self.assertAlmostEqual(learner.get_weight_decay()[0], 1e-4)\n\n # test load and save model\n learner.save_model('/tmp/test_model')\n learner.load_model('/tmp/test_model')\n\n # test validate\n cm = learner.validate(val_data=val)\n print(cm)\n for i, row in enumerate(cm):\n self.assertEqual(np.argmax(row), i)\n\n # test predictor\n p = ktrain.get_predictor(learner.model, preproc)\n self.assertEqual(p.predict([TEST_DOC])[0], 'soc.religion.christian')\n p.save('/tmp/test_predictor')\n p = ktrain.load_predictor('/tmp/test_predictor')\n self.assertEqual(p.predict(TEST_DOC), 'soc.religion.christian')\n self.assertEqual(np.argmax(p.predict_proba([TEST_DOC])[0]), 3)\n self.assertEqual(type(p.explain(TEST_DOC)), IPython.core.display.HTML)\n\n\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.argmax", "sklearn.datasets.fetch_20newsgroups" ] ]
kellirestivo/neuralpredictors
[ "8e60c9ce91f83e3dcaa1b3dbe4422e1509ccbd5f" ]
[ "neuralpredictors/layers/readouts/factorized.py" ]
[ "import torch\nfrom torch import nn as nn\nimport numpy as np\n\nfrom .base import Readout\n\n\nclass FullFactorized2d(Readout):\n \"\"\"\n Factorized fully connected layer. Weights are a sum of outer products between a spatial filter and a feature vector.\n \"\"\"\n\n def __init__(\n self,\n in_shape,\n outdims,\n bias,\n normalize=True,\n init_noise=1e-3,\n constrain_pos=False,\n positive_weights=False,\n shared_features=None,\n mean_activity=None,\n spatial_and_feature_reg_weight=1.0,\n gamma_readout=None, # depricated, use feature_reg_weight instead\n **kwargs,\n ):\n\n super().__init__()\n\n c, w, h = in_shape\n self.in_shape = in_shape\n self.outdims = outdims\n self.positive_weights = positive_weights\n self.constrain_pos = constrain_pos\n self.init_noise = init_noise\n self.normalize = normalize\n self.mean_activity = mean_activity\n self.spatial_and_feature_reg_weight = self.resolve_deprecated_gamma_readout(\n spatial_and_feature_reg_weight, gamma_readout\n )\n\n self._original_features = True\n self.initialize_features(**(shared_features or {}))\n self.spatial = nn.Parameter(torch.Tensor(self.outdims, w, h))\n\n if bias:\n bias = nn.Parameter(torch.Tensor(outdims))\n self.register_parameter(\"bias\", bias)\n else:\n self.register_parameter(\"bias\", None)\n\n self.initialize(mean_activity)\n\n @property\n def shared_features(self):\n return self._features\n\n @property\n def features(self):\n if self._shared_features:\n return self.scales * self._features[self.feature_sharing_index, ...]\n else:\n return self._features\n\n @property\n def weight(self):\n if self.positive_weights:\n self.features.data.clamp_min_(0)\n n = self.outdims\n c, w, h = self.in_shape\n return self.normalized_spatial.view(n, 1, w, h) * self.features.view(n, c, 1, 1)\n\n @property\n def normalized_spatial(self):\n \"\"\"\n Normalize the spatial mask\n \"\"\"\n if self.normalize:\n norm = self.spatial.pow(2).sum(dim=1, keepdim=True)\n norm = norm.sum(dim=2, keepdim=True).sqrt().expand_as(self.spatial) + 1e-6\n weight = self.spatial / norm\n else:\n weight = self.spatial\n if self.constrain_pos:\n weight.data.clamp_min_(0)\n return weight\n\n def regularizer(self, reduction=\"sum\", average=None):\n return self.l1(reduction=reduction, average=average) * self.spatial_and_feature_reg_weight\n\n def l1(self, reduction=\"sum\", average=None):\n reduction = self.resolve_reduction_method(reduction=reduction, average=average)\n if reduction is None:\n raise ValueError(\"Reduction of None is not supported in this regularizer\")\n\n n = self.outdims\n c, w, h = self.in_shape\n ret = (\n self.normalized_spatial.view(self.outdims, -1).abs().sum(dim=1, keepdim=True)\n * self.features.view(self.outdims, -1).abs().sum(dim=1)\n ).sum()\n if reduction == \"mean\":\n ret = ret / (n * c * w * h)\n return ret\n\n def initialize(self, mean_activity=None):\n \"\"\"\n Initializes the mean, and sigma of the Gaussian readout along with the features weights\n \"\"\"\n if mean_activity is None:\n mean_activity = self.mean_activity\n self.spatial.data.normal_(0, self.init_noise)\n self._features.data.normal_(0, self.init_noise)\n if self._shared_features:\n self.scales.data.fill_(1.0)\n if self.bias is not None:\n self.initialize_bias(mean_activity=mean_activity)\n\n def initialize_features(self, match_ids=None, shared_features=None):\n \"\"\"\n The internal attribute `_original_features` in this function denotes whether this instance of the FullGuassian2d\n learns the original features (True) or if it uses a copy of the features from another instance of FullGaussian2d\n via the `shared_features` (False). If it uses a copy, the feature_l1 regularizer for this copy will return 0\n \"\"\"\n c, w, h = self.in_shape\n if match_ids is not None:\n assert self.outdims == len(match_ids)\n\n n_match_ids = len(np.unique(match_ids))\n if shared_features is not None:\n assert shared_features.shape == (\n n_match_ids,\n c,\n ), f\"shared features need to have shape ({n_match_ids}, {c})\"\n self._features = shared_features\n self._original_features = False\n else:\n self._features = nn.Parameter(\n torch.Tensor(n_match_ids, c)\n ) # feature weights for each channel of the core\n self.scales = nn.Parameter(torch.Tensor(self.outdims, 1)) # feature weights for each channel of the core\n _, sharing_idx = np.unique(match_ids, return_inverse=True)\n self.register_buffer(\"feature_sharing_index\", torch.from_numpy(sharing_idx))\n self._shared_features = True\n else:\n self._features = nn.Parameter(torch.Tensor(self.outdims, c)) # feature weights for each channel of the core\n self._shared_features = False\n\n def forward(self, x, shift=None):\n if shift is not None:\n raise NotImplementedError(\"shift is not implemented for this readout\")\n if self.constrain_pos:\n self.features.data.clamp_min_(0)\n\n N, c, w, h = x.size()\n c_in, w_in, h_in = self.in_shape\n if (c_in, w_in, h_in) != (c, w, h):\n raise ValueError(\"the specified feature map dimension is not the readout's expected input dimension\")\n\n y = torch.einsum(\"ncwh,owh->nco\", x, self.normalized_spatial)\n y = torch.einsum(\"nco,oc->no\", y, self.features)\n if self.bias is not None:\n y = y + self.bias\n return y\n\n def __repr__(self):\n c, w, h = self.in_shape\n r = self.__class__.__name__ + \" (\" + \"{} x {} x {}\".format(c, w, h) + \" -> \" + str(self.outdims) + \")\"\n if self.bias is not None:\n r += \" with bias\"\n if self._shared_features:\n r += \", with {} features\".format(\"original\" if self._original_features else \"shared\")\n if self.normalize:\n r += \", normalized\"\n else:\n r += \", unnormalized\"\n for ch in self.children():\n r += \" -> \" + ch.__repr__() + \"\\n\"\n return r\n\n\n# Classes for backwards compatibility\nclass SpatialXFeatureLinear(FullFactorized2d):\n pass\n\n\nclass FullSXF(FullFactorized2d):\n pass\n" ]
[ [ "torch.einsum", "torch.from_numpy", "torch.Tensor", "numpy.unique" ] ]
AsimKhan2019/OpenAI-Lab
[ "d0669d89268f2dc01c1cf878e4879775c7b6eb3c" ]
[ "rl/hyperoptimizer/random_search.py" ]
[ "import json\nimport numpy as np\nfrom rl.analytics import ideal_fitness_score\nfrom rl.hyperoptimizer.base_hyperoptimizer import HyperOptimizer\nfrom rl.util import PROBLEMS, to_json, logger\n\n\nclass RandomSearch(HyperOptimizer):\n\n '''\n Random Search by sampling on hysphere around a search path\n algo:\n 1. init x a random position in space\n 2. until termination (max_eval or fitness, e.g. solved all), do:\n 2.1 sample new pos some radius away: next_x = x + r\n 2.2 if f(next_x) > f(x) then set x = next_x\n\n Extra search memory units:\n - search_path\n - best_point\n\n save for experiment resume, search_history:\n - search_path\n - best_point\n - param_search_list\n '''\n\n # # calculate the constant radius needed to traverse unit cube\n # def cube_traversal_radius(self):\n # traversal_diameter = 1/np.power(self.max_evals,\n # 1/self.search_dim)\n # traversal_radius = traversal_diameter/2\n # return traversal_radius\n\n def decay_search_radius(self):\n '''\n start of half cube for diameter (0.25 radius) then decay\n at 100 searches, will shrink to 1/10 of initial radius 0.025\n clip to prevent going too small (0.01)\n '''\n min_radius = 0.01\n linear_decay_rate = self.next_trial_num/10./self.PARALLEL_PROCESS_NUM\n self.search_radius = np.clip(\n self.init_search_radius / linear_decay_rate,\n min_radius, self.init_search_radius)\n\n @classmethod\n def sample_hypersphere(cls, dim, r=1):\n '''Marsaglia algo for sampling uniformly on a hypersphere'''\n v = np.random.randn(dim)\n v = v * r / np.linalg.norm(v)\n return v\n\n def sample_cube(self):\n return np.random.rand(self.search_dim)\n\n def sample_r(self):\n return self.sample_hypersphere(\n self.search_dim, self.search_radius)\n\n # biject [0, 1] to [x_min, x_max]\n def biject_continuous(self, norm_val, x_min, x_max):\n return np.around(norm_val*(x_max - x_min) + x_min, self.precision)\n\n # biject [0, 1] to x_list = [a, b, c, ...] by binning\n def biject_discrete(self, norm_val, x_list):\n list_len = len(x_list)\n inds = np.arange(list_len)\n cont_val = self.biject_continuous(norm_val, 0, list_len)\n ind = np.digitize(cont_val, inds) - 1\n return x_list[ind]\n\n # biject one dimension: [0, 1] to a param_range val\n def biject_dim(self, norm_val, dim_spec):\n if isinstance(dim_spec, list): # discrete\n return self.biject_discrete(norm_val, dim_spec)\n else: # cont\n return self.biject_continuous(\n norm_val, dim_spec['min'], dim_spec['max'])\n return\n\n # biject a vector on unit cube into a param in param_space\n def biject_param(self, v):\n param = {}\n for i, param_key in enumerate(self.param_range_keys):\n dim_spec = self.param_range[param_key]\n param[param_key] = self.biject_dim(v[i], dim_spec)\n return param\n\n def init_search(self):\n '''\n Initialize the random search internal variables\n '''\n self.max_evals = self.experiment_spec['param']['max_evals']\n self.num_of_trials = self.max_evals\n self.search_dim = len(self.param_range_keys)\n self.precision = 4 # decimal roundoff biject_continuous\n self.search_radius = self.init_search_radius = 0.5\n self.search_count = 0 # number of times search() has ran\n self.search_exhausted = False\n self.search_path = []\n self.best_point = {\n 'trial_num': None,\n 'param': None,\n 'x': self.sample_cube(),\n 'fitness_score': float('-inf'),\n }\n problem = PROBLEMS.get(self.experiment_spec['problem'])\n self.ideal_fitness_score = ideal_fitness_score(problem)\n logger.info(\n 'ideal_fitness_scrore: {}'.format(self.ideal_fitness_score))\n\n self.filename = './data/{}/random_search_history.json'.format(\n self.experiment_id)\n if self.experiment_id_override is not None:\n self.load() # resume\n\n def search(self):\n '''\n algo step 2.1 sample new pos some radius away: next_x = x + r\n update search_path and param_search_list\n '''\n self.search_count += 1\n if self.next_trial_num < len(self.search_path): # resuming\n next_x = self.search_path[self.next_trial_num]\n next_param = self.param_search_list[self.next_trial_num]\n else:\n next_x = np.clip(self.best_point['x'] + self.sample_r(), 0., 1.)\n # check if too close to previously searched x\n distances = [np.linalg.norm(next_x - old_x)\n for old_x in self.search_path]\n distances = np.around(distances, self.precision)\n\n if self.search_count > (10 * self.max_evals):\n self.search_exhausted = True # exhausted search space\n next_param = self.biject_param(next_x)\n self.search_path.append(next_x)\n self.param_search_list.append(next_param)\n elif len(distances) > 0 and np.amin(distances) == 0:\n self.search()\n else:\n next_param = self.biject_param(next_x)\n self.search_path.append(next_x)\n self.param_search_list.append(next_param)\n\n def update_search(self):\n '''\n algo step 2.2 if f(next_x) > f(x) then set x = next_x\n invoked right after the latest run_trial()\n update self.best_point\n '''\n if (self.next_trial_num < self.PARALLEL_PROCESS_NUM or\n self.next_trial_num < len(self.search_path)):\n # yet to have history or still resuming from history\n return\n assert len(self.experiment_data) > 0, \\\n 'self.experiment_data must not be empty for update_search'\n\n self.decay_search_radius()\n\n x = self.search_path[-1]\n trial_data = self.experiment_data[-1]\n trial_num, param, fitness_score = self.get_fitness(trial_data)\n if fitness_score > self.best_point['fitness_score']:\n self.best_point = {\n 'trial_num': trial_num,\n 'param': param,\n 'x': x,\n 'fitness_score': fitness_score,\n }\n self.save()\n\n def save(self):\n search_history = {\n 'search_path': self.search_path,\n 'search_count': self.search_count,\n 'best_point': self.best_point,\n 'param_search_list': self.param_search_list,\n }\n with open(self.filename, 'w') as f:\n f.write(to_json(search_history))\n logger.info(\n 'Save search history to {}'.format(self.filename))\n return\n\n def load(self):\n try:\n search_history = json.loads(open(self.filename).read())\n self.search_path = search_history['search_path']\n self.best_point = search_history['best_point']\n self.param_search_list = search_history['param_search_list']\n logger.info('Load search history from {}'.format(self.filename))\n except (FileNotFoundError, json.JSONDecodeError):\n logger.info(\n 'Fail to load search history from {}'.format(self.filename))\n return None\n\n def satisfy_fitness(self):\n '''\n break on the first strong solution\n '''\n best_fitness_score = self.best_point['fitness_score']\n if self.next_trial_num < self.PARALLEL_PROCESS_NUM:\n return False\n elif best_fitness_score > self.ideal_fitness_score:\n logger.info(\n 'fitness_score {} > ideal_fitness_score {}, '\n 'could terminate early'.format(\n best_fitness_score, self.ideal_fitness_score))\n # return True\n # TODO fix ideal_fitness_score\n return False\n else:\n return False\n\n def to_terminate(self):\n return (self.search_exhausted or\n self.next_trial_num >= self.max_evals or\n self.satisfy_fitness())\n" ]
[ [ "numpy.clip", "numpy.amin", "numpy.around", "numpy.arange", "numpy.linalg.norm", "numpy.random.randn", "numpy.random.rand", "numpy.digitize" ] ]
DanieleGammelli/graph-meta-rl-for-amod
[ "bd6fe8cd561ab70e85d360966715a53a0a56386c" ]
[ "src/algos/gnn.py" ]
[ "\"\"\"\nA2C-GNN\n-------\nThis file contains the A2C-GNN specifications. In particular, we implement:\n(1) GNNParser\n Converts raw environment observations to agent inputs (s_t).\n(2) GNNActor:\n Policy represented by a Temporal Graph Network (Section 4.3 in the paper)\n(3) GNNCritic:\n Critic represented by a Temporal Graph Network (Section 4.3 in the paper)\n(4) A2C:\n Advantage Actor Critic algorithm using a GNN parametrization for both Actor and Critic.\n\"\"\"\n\nimport numpy as np\nimport json\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.distributions import Dirichlet\nfrom torch_geometric.data import Data\nfrom torch_geometric.nn import GCNConv\nfrom torch_geometric.nn import global_mean_pool, global_max_pool\nfrom torch_scatter import scatter_sum, scatter_mean\nfrom torch_geometric.utils import grid\nfrom collections import namedtuple\n\nSavedAction = namedtuple('SavedAction', ['log_prob', 'value'])\nargs = namedtuple('args', ('render', 'gamma', 'log_interval'))\nargs.render= True\nargs.gamma = 0.99\nargs.log_interval = 10\n\n#########################################\n############## A2C PARSER ###############\n#########################################\n\nclass GNNParser():\n \"\"\"\n Parser converting raw environment observations to agent inputs (s_t).\n \"\"\"\n def __init__(self, env, T=10, scale_factor=0.01, json_file=None):\n super().__init__()\n self.env = env\n self.T = T\n self.s = scale_factor\n self.json_file = json_file\n if self.json_file is not None:\n with open(json_file,\"r\") as file:\n self.data = json.load(file)\n \n def parse_obs(self, obs, a_t_1, r1_t_1, r2_t_1, d_t_1):\n # Takes input from the environemnt and returns a graph (with node features and connectivity)\n # Here we aggregate environment observations into node-wise features\n # In order, x is a collection of the following information:\n # 1) current availability, 2) Estimated availability, 3) one-hot representation of time-of-day\n # 4) current demand, 5) RL2 info (i.e., a_{t-1}, r_{t-1} (both matching and rebalancing), d_{t-1}\n x = torch.cat((\n torch.tensor([obs[0][n][self.env.time+1] for n in self.env.region]).view(1, 1, self.env.nregion).float(),\n torch.tensor([[(obs[0][n][self.env.time+1] + self.env.dacc[n][t]) for n in self.env.region] \\\n for t in range(self.env.time+1, self.env.time+self.T+1)]).view(1, self.T, self.env.nregion).float(),\n torch.nn.functional.one_hot(torch.tensor([self.env.time]*self.env.nregion), num_classes=self.env.tf).view(1, self.env.tf, self.env.nregion).float(),\n torch.tensor([sum([(self.env.demand[i,j][self.env.time+1]) \\\n for j in self.env.region]) for i in self.env.region]).view(1, 1, self.env.nregion).float(),\n a_t_1.view(1, 1, self.env.nregion).float(),\n r1_t_1.view(1, 1, self.env.nregion).float(),\n r2_t_1.view(1, 1, self.env.nregion).float(),\n d_t_1.view(1, 1, self.env.nregion).float()),\n dim=1).squeeze(0).view(36, self.env.nregion).T\n if self.json_file is not None:\n edge_index = torch.vstack((torch.tensor([edge['i'] for edge in self.data[\"topology_graph\"]]).view(1,-1), \n torch.tensor([edge['j'] for edge in self.data[\"topology_graph\"]]).view(1,-1))).long()\n else:\n edge_index = torch.cat((torch.arange(self.env.nregion).view(1, self.env.nregion), \n torch.arange(self.env.nregion).view(1, self.env.nregion)), dim=0).long()\n data = Data(x, edge_index)\n return data\n\n#########################################\n############## A2C ACTOR ################\n#########################################\n\nclass Actor(torch.nn.Module):\n \"\"\"\n Actor \\pi(a_t | s_t) parametrizing the concentration parameters of a Dirichlet Policy.\n \"\"\"\n def __init__(self, input_size=4, hidden_dim=256, output_dim=1):\n super(Actor, self).__init__()\n self.hidden_dim = hidden_dim\n \n self.lin1 = nn.Linear(input_size, hidden_dim)\n self.gru = nn.GRUCell(hidden_dim, hidden_dim)\n self.h_to_g = nn.Linear(hidden_dim, hidden_dim)\n self.g_to_a = nn.Linear(input_size + hidden_dim, output_dim)\n\n def forward(self, x, edge_index, h):\n # x: [N, F_x], where N is the number of nodes.\n # edge_index: [2, E] with max entry N - 1.\n # h: GRU hiddent state\n row, col = edge_index\n x_temp = torch.cat([x[row]])\n h_temp = h\n\n x_temp = F.relu(self.lin1(x_temp))\n x_temp = scatter_sum(x_temp, col, dim=0, dim_size=x.size(0))\n\n h = self.gru(x_temp, h_temp)\n\n x_temp = F.relu(self.h_to_g(h))\n x_temp = torch.cat([x, x_temp], dim=1)\n\n a = F.softplus(self.g_to_a(x_temp))\n return a, h\n\n#########################################\n############## A2C CRITIC ###############\n#########################################\n\nclass Critic(torch.nn.Module):\n \"\"\"\n Critic parametrizing the value function estimator V(s_t).\n \"\"\"\n def __init__(self, input_size=4, hidden_dim=256, output_dim=1):\n super(Critic, self).__init__()\n self.hidden_dim = hidden_dim\n \n self.lin1 = nn.Linear(input_size, hidden_dim)\n self.gru = nn.GRUCell(hidden_dim, hidden_dim)\n self.h_to_g = nn.Linear(hidden_dim, hidden_dim)\n self.g_to_a = nn.Linear(input_size + hidden_dim, output_dim)\n\n def forward(self, x, edge_index, h):\n # x: [N, F_x], where N is the number of nodes.\n # edge_index: [2, E] with max entry N - 1.\n # h: GRU hiddent state\n row, col = edge_index\n x_temp = torch.cat([x[row]])\n h_temp = h\n\n x_temp = F.relu(self.lin1(x_temp))\n x_temp = scatter_sum(x_temp, col, dim=0, dim_size=x.size(0))\n\n h = self.gru(x_temp, h_temp)\n\n x_temp = F.relu(self.h_to_g(h))\n\n x_temp = torch.cat([x, x_temp], dim=1)\n x_temp = torch.sum(x_temp, dim=0)\n\n v = self.g_to_a(x_temp)\n return v, h\n\n#########################################\n############## A2C AGENT ################\n#########################################\n\nclass A2C(nn.Module):\n \"\"\"\n Advantage Actor Critic algorithm for the AMoD control problem. \n \"\"\"\n def __init__(self, env, input_size, hidden_size=128, eps=np.finfo(np.float32).eps.item(), device=torch.device(\"cpu\"),\n clip=50, env_baseline=None):\n super(A2C, self).__init__()\n self.env = env\n self.eps = eps\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.device = device\n self.clip = clip if clip is not None else -1\n \n self.actor = Actor(self.input_size, self.hidden_size, 1)\n self.critic = Critic(self.input_size, self.hidden_size, 1)\n \n self.optimizers = self.configure_optimizers()\n \n # action & reward buffer\n self.saved_actions = []\n self.rewards = []\n self.env_baseline = env_baseline\n self.to(self.device)\n \n def forward(self, obs, h_a, h_c, jitter=1e-20):\n \"\"\"\n forward of both actor and critic\n \"\"\"\n # parse raw environment data in model format\n graph = obs.to(self.device)\n \n # actor: computes concentration parameters of a X distribution\n a_probs, h_a = self.actor(graph.x, graph.edge_index, h_a)\n\n # critic: estimates V(s_t)\n value, h_c = self.critic(graph.x, graph.edge_index, h_c)\n return a_probs + jitter, value, h_a, h_c\n \n def select_action(self, obs, h_a, h_c):\n a_probs, value, h_a, h_c = self.forward(obs, h_a, h_c)\n \n m = Dirichlet(concentration=a_probs.view(-1,))\n \n action = m.sample()\n self.saved_actions.append(SavedAction(m.log_prob(action), value))\n return action, h_a, h_c\n\n def training_step(self, city):\n R = 0\n saved_actions = self.saved_actions\n policy_losses = [] # list to save actor (policy) loss\n value_losses = [] # list to save critic (value) loss\n returns = [] # list to save the true values\n\n # calculate the true value using rewards returned from the environment\n for r in self.rewards[::-1]:\n # calculate the discounted value\n r = float((r - np.mean(self.env_baseline[city])) / (np.std(self.env_baseline[city]) + self.eps)) # env-dependent baseline to standardinze rewards\n R = r + args.gamma * R\n returns.insert(0, R)\n\n returns = torch.tensor(returns).float()\n \n for (log_prob, value), R in zip(saved_actions, returns):\n advantage = R - value.item()\n\n # calculate actor (policy) loss \n policy_losses.append(-log_prob * advantage)\n\n # calculate critic (value) loss using L1 smooth loss\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).to(self.device)))\n\n # take gradient steps\n self.optimizers['a_optimizer'].zero_grad()\n a_loss = torch.stack(policy_losses).sum()\n a_loss.backward()\n if self.clip >= 0:\n a_grad_norm = torch.nn.utils.clip_grad_norm_(self.actor.parameters(), self.clip)\n self.optimizers['a_optimizer'].step()\n \n self.optimizers['c_optimizer'].zero_grad()\n v_loss = torch.stack(value_losses).sum()\n v_loss.backward()\n if self.clip >= 0:\n v_grad_norm = torch.nn.utils.clip_grad_norm_(self.critic.parameters(), self.clip)\n self.optimizers['c_optimizer'].step()\n \n # reset rewards and action buffer\n del self.rewards[:]\n del self.saved_actions[:]\n if self.clip >= 0:\n return {\"a_grad_norm\": a_grad_norm, \"v_grad_norm\": a_grad_norm}\n \n def configure_optimizers(self):\n optimizers = dict()\n actor_params = list(self.actor.parameters())\n critic_params = list(self.critic.parameters())\n optimizers['a_optimizer'] = torch.optim.Adam(actor_params, lr=3e-4)\n optimizers['c_optimizer'] = torch.optim.Adam(critic_params, lr=3e-4)\n return optimizers\n \n def save_checkpoint(self, path='ckpt.pth'):\n checkpoint = dict()\n checkpoint['model'] = self.state_dict()\n for key, value in self.optimizers.items():\n checkpoint[key] = value.state_dict()\n torch.save(checkpoint, path)\n \n def load_checkpoint(self, path='ckpt.pth'):\n checkpoint = torch.load(path)\n self.load_state_dict(checkpoint['model'])\n for key, value in self.optimizers.items():\n self.optimizers[key].load_state_dict(checkpoint[key])\n \n def log(self, log_dict, path='log.pth'):\n torch.save(log_dict, path)" ]
[ [ "torch.optim.Adam", "torch.cat", "torch.load", "torch.sum", "torch.stack", "torch.tensor", "numpy.finfo", "torch.nn.Linear", "numpy.std", "numpy.mean", "torch.arange", "torch.device", "torch.nn.GRUCell", "torch.save" ] ]
mittelmax/sptrans_Data
[ "a1ccb0d9566c9c9f9b30951bdc8430c930007311" ]
[ "downloader.py" ]
[ "from bs4 import BeautifulSoup\nimport lxml\nimport requests\nimport pandas as pd\nimport urllib\nimport ssl\nfrom os import listdir\n\n# data url's\nurl_2020 = requests.get(\"https://www.prefeitura.sp.gov.br/cidade/secretarias/transportes/institucional/sptrans/acesso_a_informacao/index.php?p=292723\").text\nurl_2019 = requests.get(\"https://www.prefeitura.sp.gov.br/cidade/secretarias/transportes/institucional/sptrans/acesso_a_informacao/index.php?p=269652\").text\nurl_2018 = requests.get(\"https://www.prefeitura.sp.gov.br/cidade/secretarias/transportes/institucional/sptrans/acesso_a_informacao/index.php?p=247850\").text\nurl_2017 = requests.get(\"https://www.prefeitura.sp.gov.br/cidade/secretarias/transportes/institucional/sptrans/acesso_a_informacao/index.php?p=228269\").text\nurl_2016 = requests.get(\"https://www.prefeitura.sp.gov.br/cidade/secretarias/transportes/institucional/sptrans/acesso_a_informacao/index.php?p=209427\").text\nurl_2015 = requests.get(\"https://www.prefeitura.sp.gov.br/cidade/secretarias/transportes/institucional/sptrans/acesso_a_informacao/index.php?p=188767\").text\n\n\n# 2020\nsoup_2020 = BeautifulSoup(url_2020, \"lxml\")\nlinks_2020 = [i[\"href\"] for i in soup_2020.findAll(\"a\", href=True) if i[\"href\"].endswith(\".xls\")\n and \"Consolidad\" not in i[\"href\"] and \"Pass_Transp\" not in i[\"href\"]]\n\n# 2019\nsoup_2019 = BeautifulSoup(url_2019, \"lxml\")\nlinks_2019 = [i[\"href\"] for i in soup_2019.findAll(\"a\", href=True) if i[\"href\"].endswith(\".xls\")\n and \"Consolidad\" not in i[\"href\"] and \"Pass_Transp\" not in i[\"href\"]]\n\n# 2018\nsoup_2018 = BeautifulSoup(url_2018, \"lxml\")\nlinks_2018 = [i[\"href\"] for i in soup_2018.findAll(\"a\", href=True) if i[\"href\"].endswith(\".xls\")\n and \"Consolidad\" not in i[\"href\"] and \"Pass_Transp\" not in i[\"href\"]]\n\n# 2017\nsoup_2017 = BeautifulSoup(url_2017, \"lxml\")\nlinks_2017 = [i[\"href\"] for i in soup_2017.findAll(\"a\", href=True) if i[\"href\"].endswith(\".xls\")\n and \"Consolidad\" not in i[\"href\"] and \"Pass_Transp\" not in i[\"href\"]]\n\n# 2016\nsoup_2016 = BeautifulSoup(url_2016, \"lxml\")\nlinks_2016 = [i[\"href\"] for i in soup_2016.findAll(\"a\", href=True) if i[\"href\"].endswith(\".xls\")\n and \"Consolidad\" not in i[\"href\"] and \"Pass_Transp\" not in i[\"href\"]]\n\n# 2015\nsoup_2015 = BeautifulSoup(url_2015, \"lxml\")\nlinks_2015 = [i[\"href\"] for i in soup_2015.findAll(\"a\", href=True) if i[\"href\"].endswith(\".xls\")\n and \"Consolidad\" not in i[\"href\"] and \"Pass_Transp\" not in i[\"href\"]]\n\nall_links = links_2020 + links_2019 + links_2018 + links_2017 + links_2016 + links_2015\n\n# # Disabling ssl verification\n# try:\n# _create_unverified_https_context = ssl._create_unverified_context\n# except AttributeError:\n# pass\n# else:\n# ssl._create_default_https_context = _create_unverified_https_context\n\n# Removing spaces and latin-1 characters in url's\nfor i in range(len(all_links)):\n all_links[i] = all_links[i].replace(\" \", \"%20\")\n all_links[i] = all_links[i].replace(\"ê\", \"%C3%AA\")\n\n# Scraping data\nmissing_files = []\n\nfor i in range(len(all_links)):\n try:\n file_i, header_i = urllib.request.urlretrieve(all_links[i], \"Data/file_{}.xls\".format(i+1))\n print(\"file {} succesfully downloaded\".format(i+1))\n except:\n print(\"There was a problem with file {}\".format(i+1))\n missing_files.append(\"file_{}\".format(i+1))\n\nprint(missing_files)\n\n# Since only 2 days are missing (file 1613 and file 1265) we can make them equal to the previous day\ndf_1613 = pd.read_excel(\"Data/file_1612.xls\")\ndf_1613[\"DATA\"].replace({pd.to_datetime(\"2016-10-20 00:00:00\"):pd.to_datetime(\"2016-10-21 00:00:00\")}, inplace=True)\n\ndf_1265 = pd.read_excel(\"Data/file_1264.xls\")\ndf_1265[\"Data\"].replace({pd.to_datetime(\"2017-11-07 00:00:00\"):pd.to_datetime(\"2017-11-08 00:00:00\")}, inplace=True)\n\ndf_1613.to_excel(\"Data/file_1613.xls\")\ndf_1265.to_excel(\"Data/file_1265.xls\")\n" ]
[ [ "pandas.read_excel", "pandas.to_datetime" ] ]
Teagum/chainsaddiction
[ "b57995e60361134cfd4f6f748a92457e8ef14c81" ]
[ "tests/utils.py" ]
[ "\"\"\"\nChaisAddiction testing utilities\n\"\"\"\nfrom pathlib import Path\nfrom typing import Union\n\nimport numpy as np\n\n\ndef gen_init_params(m_states: int, data: np.ndarray) -> tuple:\n \"\"\"\n Generate initila parameters for HMM training.\n \"\"\"\n init_lambda = gen_sdm(data, m_states)\n init_gamma = gen_prob_mat(m_states, m_states)\n init_delta = gen_prob_mat(1, m_states)\n return init_lambda, init_gamma, init_delta\n\n\ndef load_data(path: Union[str, Path], file: Union[str, Path],\n dtype: str = 'float128'):\n \"\"\"\n Load test data from file.\n \"\"\"\n path = Path(path)\n dfp = path.joinpath(file)\n return np.fromfile(str(dfp), dtype=dtype, sep='\\n')\n\n\ndef gen_sdm(data: np.ndarray, m_states: int) -> np.ndarray:\n \"\"\"\n Generate an initial guess of the state-dependend means.\n \"\"\"\n return np.linspace(data.min(), data.max(), m_states)\n\n\ndef gen_prob_mat(rows: int, cols: int) -> np.ndarray:\n \"\"\"\n Generate a random probability matrix.\n \"\"\"\n var = np.exp(np.random.rand(rows, cols))\n return var / var.sum(axis=1, keepdims=True)\n" ]
[ [ "numpy.random.rand" ] ]
bwspenc/neml
[ "2fe283cb14ab309d95627590408670dab877c5a5" ]
[ "test/test_elasticity.py" ]
[ "import sys\nsys.path.append('..')\n\nfrom neml import elasticity, interpolate\nimport unittest\n\nfrom common import *\n\nimport numpy as np\nimport numpy.linalg as la\n\nclass CommonElasticity(object):\n \"\"\"\n Tests that could apply to any elastic model.\n \"\"\"\n def test_C2S(self):\n C = self.model.C(self.T)\n self.assertTrue(np.allclose(self.model.S(self.T), la.inv(C)))\n\n def test_S2C(self):\n S = self.model.S(self.T)\n self.assertTrue(np.allclose(self.model.C(self.T), la.inv(S)))\n\nclass TestIsotropicConstantModel(CommonElasticity, unittest.TestCase):\n def setUp(self):\n self.mu = 29000.0\n self.K = 64000.0\n self.T = 325.0\n\n self.model = elasticity.IsotropicLinearElasticModel(self.mu, \n \"shear\", self.K, \"bulk\")\n\n def test_modulii(self):\n S = self.model.S(self.T)\n E = 9*self.K*self.mu/(3*self.K + self.mu)\n nu = (3*self.K - 2*self.mu)/(2*(3*self.K+self.mu))\n\n self.assertTrue(np.isclose(S[0,0], 1/E))\n self.assertTrue(np.isclose(S[0,1], -nu/E))\n self.assertTrue(np.isclose(S[3,3], (1+nu)/E))\n\n\nclass TestEquivalentDefinitions(unittest.TestCase):\n def setUp(self):\n self.E = 100000.0\n self.nu = 0.3\n\n self.mu = self.E / (2 * (1 + self.nu))\n self.K = self.E / (3 * (1 - 2.0 * self.nu))\n \n self.model_Ev = elasticity.IsotropicLinearElasticModel(\n self.E, \"youngs\", self.nu, \"poissons\")\n self.model_GK = elasticity.IsotropicLinearElasticModel(\n self.mu, \"shear\", self.K, \"bulk\")\n\n self.T = 300.0\n\n def test_equivalent_C(self):\n C1 = self.model_Ev.C(self.T)\n C2 = self.model_GK.C(self.T)\n self.assertTrue(np.allclose(C1,C2))\n\n def test_equivalent_S(self):\n S1 = self.model_Ev.S(self.T)\n S2 = self.model_GK.S(self.T)\n self.assertTrue(np.allclose(S1,S2))\n \n def test_equivalent_modulii(self):\n self.assertTrue(np.isclose(self.E, self.model_Ev.E(self.T)))\n self.assertTrue(np.isclose(self.E, self.model_GK.E(self.T)))\n self.assertTrue(np.isclose(self.nu, self.model_Ev.nu(self.T)))\n self.assertTrue(np.isclose(self.nu, self.model_GK.nu(self.T)))\n self.assertTrue(np.isclose(self.mu, self.model_Ev.G(self.T)))\n self.assertTrue(np.isclose(self.mu, self.model_GK.G(self.T)))\n self.assertTrue(np.isclose(self.K, self.model_Ev.K(self.T)))\n self.assertTrue(np.isclose(self.K, self.model_GK.K(self.T)))\n" ]
[ [ "numpy.linalg.inv", "numpy.allclose", "numpy.isclose" ] ]
NasserMo/ulmo
[ "46c835100bb76f44f46c752f8cadb3cbe84652b1" ]
[ "ulmo/usgs/nwis/hdf5.py" ]
[ "import contextlib\nimport copy\nimport os\nimport shutil\nimport sys\nimport tempfile\nimport warnings\n\nimport numpy as np\nimport pandas\nimport tables\nfrom tables.scripts import ptrepack\n\nfrom ulmo import util\nfrom ulmo.usgs.nwis import core\n\n\n# default hdf5 file path\nDEFAULT_HDF5_FILE_PATH = util.get_default_h5file_path('usgs/')\n\n# define column sizes for strings stored in hdf5 tables\n# note: this is currently not used as we simply read in and write out entire\n# site dataframes to the store (not using tables type)\nSITES_MIN_ITEMSIZE = {\n 'agency': 20,\n 'code': 20,\n 'county': 30,\n 'huc': 20,\n 'name': 250,\n 'network': 20,\n 'site_type': 20,\n 'state_code': 2,\n 'srs': 20,\n 'default_tz_abbreviation': 5,\n 'default_tz_offset': 7,\n 'dst_tz_abbreviation': 5,\n 'dst_tz_offset': 7,\n}\n\nSITES_TABLE = 'sites'\n\n\ndef get_sites(path=None, complevel=None, complib=None):\n \"\"\"Fetches previously-cached site information from an hdf5 file.\n\n Parameters\n ----------\n path : ``None`` or file path\n Path to the hdf5 file to be queried, if ``None`` then the default path\n will be used. If a file path is a directory, then multiple hdf5 files\n will be kept so that file sizes remain small for faster repacking.\n complevel : ``None`` or int {0-9}\n Open hdf5 file with this level of compression. If ``None` (default),\n then a maximum compression level will be used if a compression library\n can be found. If set to 0 then no compression will be used regardless of\n what complib is.\n complib : ``None`` or str {'zlib', 'bzip2', 'lzo', 'blosc'}\n Open hdf5 file with this type of compression. If ``None`` (default) then\n the best available compression library available on your system will be\n selected. If complevel argument is set to 0 then no compression will be\n used.\n\n\n Returns\n -------\n sites_dict : dict\n a python dict with site codes mapped to site information\n \"\"\"\n sites_store_path = _get_store_path(path, 'sites.h5')\n comp_kwargs = _compression_kwargs(complevel=complevel, complib=complib)\n\n if not os.path.exists(sites_store_path):\n return {}\n\n with _get_store(sites_store_path, mode='r', **comp_kwargs) as store:\n if SITES_TABLE not in store:\n return {}\n\n sites_df = store[SITES_TABLE]\n sites_dict = _sites_df_to_dict(sites_df)\n return sites_dict\n\n\ndef get_site(site_code, path=None, complevel=None, complib=None):\n \"\"\"Fetches previously-cached site information from an hdf5 file.\n\n Parameters\n ----------\n site_code : str\n The site code of the site you want to get information for.\n path : ``None`` or file path\n Path to the hdf5 file to be queried, if ``None`` then the default path\n will be used. If a file path is a directory, then multiple hdf5 files\n will be kept so that file sizes remain small for faster repacking.\n complevel : ``None`` or int {0-9}\n Open hdf5 file with this level of compression. If ``None` (default),\n then a maximum compression level will be used if a compression library\n can be found. If set to 0 then no compression will be used regardless of\n what complib is.\n complib : ``None`` or str {'zlib', 'bzip2', 'lzo', 'blosc'}\n Open hdf5 file with this type of compression. If ``None`` (default) then\n the best available compression library available on your system will be\n selected. If complevel argument is set to 0 then no compression will be\n used.\n\n\n Returns\n -------\n site_dict : dict\n a python dict containing site information\n \"\"\"\n sites_store_path = _get_store_path(path, 'sites.h5')\n\n # XXX: this could be more efficiently implemented by querying the sites\n # table with actual expressions\n sites = get_sites(path=sites_store_path, complevel=complevel, complib=complib)\n try:\n return sites[site_code]\n except KeyError:\n raise LookupError(\"could not find site: %s\" % site_code)\n\n\ndef get_site_data(site_code, agency_code=None, parameter_code=None, path=None,\n complevel=None, complib=None):\n \"\"\"Fetches previously-cached site data from an hdf5 file.\n\n Parameters\n ----------\n site_code : str\n The site code of the site you want to get data for.\n agency_code : ``None`` or str\n The agency code to get data for. This will need to be set if a site code\n is in use by multiple agencies (this is rare).\n parameter_code : `None`, str, or list\n List of parameters to read. If ``None`` (default) read all parameters.\n Otherwise only read specified parameters. Parameters should be specified\n with statistic code, i.e. daily streamflow is '00060:00003'\n path : ``None`` or file path\n Path to the hdf5 file to be queried, if ``None`` then the default path\n will be used. If a file path is a directory, then multiple hdf5 files\n will be kept so that file sizes remain small for faster repacking.\n complevel : ``None`` or int {0-9}\n Open hdf5 file with this level of compression. If ``None` (default),\n then a maximum compression level will be used if a compression library\n can be found. If set to 0 then no compression will be used regardless of\n what complib is.\n complib : ``None`` or str {'zlib', 'bzip2', 'lzo', 'blosc'}\n Open hdf5 file with this type of compression. If ``None`` (default) then\n the best available compression library available on your system will be\n selected. If complevel argument is set to 0 then no compression will be\n used.\n\n\n Returns\n -------\n data_dict : dict\n a python dict with parameter codes mapped to value dicts\n \"\"\"\n site_data_path = _get_store_path(path, site_code + '.h5')\n\n comp_kwargs = _compression_kwargs(complevel=complevel, complib=complib)\n\n with _get_store(site_data_path, mode='r', **comp_kwargs) as store:\n site_group = store.get_node(site_code)\n if site_group is None:\n return {}\n\n if parameter_code:\n site_data = dict([\n (variable_group._v_pathname.rsplit('/', 1)[-1],\n _variable_group_to_dict(store, variable_group))\n for variable_group in site_group\n if variable_group._v_pathname.rsplit('/', 1)[-1] in parameter_code\n ])\n else:\n site_data = dict([\n (variable_group._v_pathname.rsplit('/', 1)[-1],\n _variable_group_to_dict(store, variable_group))\n for variable_group in site_group\n ])\n return site_data\n\n\ndef repack(path, complevel=None, complib=None):\n \"\"\"Repack the hdf5 file at path. This is the same as running the pytables\n ptrepack command on the file.\n\n Parameters\n ----------\n path : file path\n Path to the hdf5 file.\n complevel : ``None`` or int {0-9}\n Open hdf5 file with this level of compression. If ``None` (default),\n then a maximum compression level will be used if a compression library\n can be found. If set to 0 then no compression will be used regardless of\n what complib is.\n complib : ``None`` or str {'zlib', 'bzip2', 'lzo', 'blosc'}\n Open hdf5 file with this type of compression. If ``None`` (default) then\n the best available compression library available on your system will be\n selected. If complevel argument is set to 0 then no compression will be\n used.\n\n Returns\n -------\n None : ``None``\n \"\"\"\n comp_kwargs = _compression_kwargs(complevel=complevel, complib=complib)\n\n with tempfile.NamedTemporaryFile() as temp_f:\n temp_path = temp_f.name\n _ptrepack(path, temp_path, **comp_kwargs)\n shutil.copyfile(temp_path, path)\n\n\ndef update_site_list(sites=None, state_code=None, service=None, path=None,\n input_file=None, complevel=None, complib=None, autorepack=True):\n \"\"\"Update cached site information.\n\n Parameters\n ----------\n sites : str, iterable of strings or ``None``\n The site to use or list of sites to use; lists will be joined by a ','.\n state_code : str or ``None``\n Two-letter state code used in stateCd parameter.\n site_type : str or ``None``\n Type of site used in siteType parameter.\n service : {``None``, 'instantaneous', 'iv', 'daily', 'dv'}\n The service to use, either \"instantaneous\", \"daily\", or ``None``\n (default). If set to ``None``, then both services are used. The\n abbreviations \"iv\" and \"dv\" can be used for \"instantaneous\" and \"daily\",\n respectively.\n path : ``None`` or file path\n Path to the hdf5 file to be queried, if ``None`` then the default path\n will be used. If a file path is a directory, then multiple hdf5 files\n will be kept so that file sizes remain small for faster repacking.\n input_file: ``None``, file path or file object\n If ``None`` (default), then the NWIS web services will be queried, but\n if a file is passed then this file will be used instead of requesting\n data from the NWIS web services.\n complevel : ``None`` or int {0-9}\n Open hdf5 file with this level of compression. If ``None` (default),\n then a maximum compression level will be used if a compression library\n can be found. If set to 0 then no compression will be used regardless of\n what complib is.\n complib : ``None`` or str {'zlib', 'bzip2', 'lzo', 'blosc'}\n Open hdf5 file with this type of compression. If ``None`` (default) then\n the best available compression library available on your system will be\n selected. If complevel argument is set to 0 then no compression will be\n used.\n autorepack : bool\n Whether or not to automatically repack the h5 file after updating.\n There is a tradeoff between performance and disk space here: large files\n take a longer time to repack but also tend to grow larger faster, the\n default of True conserves disk space because untamed file growth can\n become quite destructive. If you set this to False, you can manually\n repack files with repack().\n\n Returns\n -------\n None : ``None``\n \"\"\"\n sites_store_path = _get_store_path(path, 'sites.h5')\n\n new_sites = core.get_sites(sites=sites, state_code=state_code, service=service,\n input_file=input_file)\n\n if len(new_sites) == 0:\n return\n\n comp_kwargs = _compression_kwargs(complevel=complevel, complib=complib)\n with _get_store(sites_store_path, mode='a', **comp_kwargs) as store:\n _update_stored_sites(store, new_sites)\n\n if autorepack:\n repack(sites_store_path, complevel=complevel, complib=complib)\n\n\ndef update_site_data(site_code, start=None, end=None, period=None, path=None,\n input_file=None, complevel=None, complib=None, autorepack=True):\n \"\"\"Update cached site data.\n\n Parameters\n ----------\n site_code : str\n The site code of the site you want to query data for.\n start : ``None`` or datetime (see :ref:`dates-and-times`)\n Start of a date range for a query. This parameter is mutually exclusive\n with period (you cannot use both).\n end : ``None`` or datetime (see :ref:`dates-and-times`)\n End of a date range for a query. This parameter is mutually exclusive\n with period (you cannot use both).\n period : {``None``, str, datetime.timedelta}\n Period of time to use for requesting data. This will be passed along as\n the period parameter. This can either be 'all' to signal that you'd like\n the entire period of record, or string in ISO 8601 period format (e.g.\n 'P1Y2M21D' for a period of one year, two months and 21 days) or it can\n be a datetime.timedelta object representing the period of time. This\n parameter is mutually exclusive with start/end dates.\n path : ``None`` or file path\n Path to the hdf5 file to be queried, if ``None`` then the default path\n will be used. If a file path is a directory, then multiple hdf5 files\n will be kept so that file sizes remain small for faster repacking.\n input_file: ``None``, file path or file object\n If ``None`` (default), then the NWIS web services will be queried, but\n if a file is passed then this file will be used instead of requesting\n data from the NWIS web services.\n autorepack : bool\n Whether or not to automatically repack the h5 file(s) after updating.\n There is a tradeoff between performance and disk space here: large files\n take a longer time to repack but also tend to grow larger faster, the\n default of True conserves disk space because untamed file growth can\n become quite destructive. If you set this to False, you can manually\n repack files with repack().\n\n\n Returns\n -------\n None : ``None``\n \"\"\"\n site_data_path = _get_store_path(path, site_code + '.h5')\n\n if input_file is None and start is None and end is None and period is None:\n prior_last_refresh = _get_last_refresh(site_code, site_data_path)\n if prior_last_refresh is None:\n period = 'all'\n else:\n start = prior_last_refresh\n\n new_site_data = core.get_site_data(site_code, start=start, end=end,\n period=period, input_file=input_file)\n\n comp_kwargs = _compression_kwargs(complevel=complevel, complib=complib)\n\n something_changed = False\n with _get_store(site_data_path, mode='a', **comp_kwargs) as store:\n for variable_code, data_dict in new_site_data.iteritems():\n variable_group_path = site_code + '/' + variable_code\n\n site_dict = data_dict.pop('site')\n\n values_path = variable_group_path + '/values'\n new_values = _values_dicts_to_df(data_dict.pop('values', {}))\n\n last_refresh = data_dict.get('last_refresh')\n if last_refresh is None:\n last_refresh = np.nan\n new_values['last_checked'] = last_refresh\n if values_path in store:\n if len(new_values) == 0:\n continue\n\n compare_cols = ['value', 'qualifiers']\n original_values = store[values_path]\n original_align, new_align = original_values.align(new_values)\n new_nulls = pandas.isnull(new_align[compare_cols]).sum(axis=1).astype(bool)\n modified_mask = ~new_nulls & ((\n original_align[compare_cols] == new_align[compare_cols]) \\\n .sum(axis=1) < len(compare_cols))\n\n combined = new_values.combine_first(original_values)\n combined['last_modified'][modified_mask] = last_refresh\n new_values = combined\n else:\n new_values['last_modified'] = last_refresh\n\n store[values_path] = new_values\n something_changed = True\n\n variable_group = store.get_node(variable_group_path)\n for key, value in data_dict.iteritems():\n setattr(variable_group._v_attrs, key, value)\n\n site_group = store.get_node(site_code)\n site_group._v_attrs.last_refresh = last_refresh\n\n if len(site_dict):\n sites_store_path = _get_store_path(path, 'sites.h5')\n with _get_store(sites_store_path, mode='a', **comp_kwargs) as store:\n _update_stored_sites(store, {site_dict['code']: site_dict})\n\n if autorepack:\n if something_changed:\n repack(site_data_path, complevel=complevel, complib=complib)\n if site_data_path != sites_store_path:\n repack(sites_store_path, complevel=complevel, complib=complib)\n\n\ndef _compression_kwargs(complevel=None, complib=None):\n \"\"\"returns a dict containing the compression settings to use\"\"\"\n if complib is None and complevel is None:\n possible_compressions = ('blosc', 'zlib')\n for possible_compression in possible_compressions:\n try:\n try_kwargs = dict(complevel=9, complib=possible_compression)\n tables.Filters(**try_kwargs)\n return try_kwargs\n except tables.FiltersWarning:\n pass\n\n complevel = 0\n\n elif complib is not None and complevel is None:\n complevel = 9\n\n return dict(complevel=complevel, complib=complib)\n\n\[email protected]\ndef _filter_warnings():\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", tables.NaturalNameWarning)\n yield\n\n\ndef _get_last_refresh(site_code, path, complevel=None, complib=None):\n comp_kwargs = _compression_kwargs(complevel=complevel, complib=complib)\n try:\n with _get_store(path, mode='r', **comp_kwargs) as store:\n site_group = store.get_node(site_code)\n if site_group is None:\n return None\n last_refresh = getattr(site_group._v_attrs, 'last_refresh')\n if pandas.isnull(last_refresh):\n last_refresh = None\n return last_refresh\n except IOError:\n return None\n\n\[email protected]\ndef _get_store(path, **kwargs):\n abs_path = os.path.abspath(path)\n dir_path = os.path.dirname(abs_path)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n with pandas.io.pytables.get_store(path, **kwargs) as store:\n with _filter_warnings():\n yield store\n\n\ndef _get_store_path(path, default_file_name):\n if path is None:\n path = DEFAULT_HDF5_FILE_PATH\n if isinstance(path, basestring) and (path.endswith('/') or\n path.endswith('\\\\')):\n return os.path.join(path, default_file_name)\n else:\n return path\n\n\ndef _nans_to_none(df):\n return df.astype(object).where(pandas.notnull(df), None)\n\n\ndef _nest_dataframe_dicts(unnested_df, nested_column, keys):\n df = unnested_df.copy()\n df = _nans_to_none(df)\n\n def _nest_func(row):\n return dict(zip(keys, row))\n\n nested_values = map(_nest_func, df[keys].values)\n df[nested_column] = nested_values\n\n for key in keys:\n del df[key]\n\n return df\n\n\ndef _ptrepack(src, dst, complevel, complib):\n \"\"\"run ptrepack to repack from src to dst\"\"\"\n with _sysargs_hacks():\n sys.argv = ['', '--complevel=%s' % complevel, '--complib=%s' % complib, src, dst]\n with _filter_warnings():\n ptrepack.main()\n\n\ndef _sites_df_to_dict(df):\n df = _nest_dataframe_dicts(df, 'location', ['latitude', 'longitude', 'srs'])\n for tz_type in ['default_tz', 'dst_tz']:\n tz_keys = ['abbreviation', 'offset']\n rename_dict = dict([\n (tz_type + '_' + key, key) for key in tz_keys])\n df = df.rename(columns=rename_dict)\n df = _nest_dataframe_dicts(df, tz_type,\n tz_keys)\n df = _nest_dataframe_dicts(df, 'timezone_info',\n ['uses_dst', 'default_tz', 'dst_tz'])\n\n return df.T.to_dict()\n\n\ndef _sites_dict_to_df(sites_dict):\n df = pandas.DataFrame(sites_dict).T.copy()\n df = _unnest_dataframe_dicts(df, 'location', ['latitude', 'longitude', 'srs'])\n df = _unnest_dataframe_dicts(df, 'timezone_info',\n ['uses_dst', 'default_tz', 'dst_tz'])\n for tz_type in ['default_tz', 'dst_tz']:\n tz_keys = ['abbreviation', 'offset']\n df = _unnest_dataframe_dicts(df, tz_type,\n tz_keys)\n rename_dict = dict([\n (key, tz_type + '_' + key) for key in tz_keys])\n df = df.rename(columns=rename_dict)\n\n return df\n\n\[email protected]\ndef _sysargs_hacks():\n \"\"\"temporarily replace sys.argv without leaking global state\"\"\"\n orig_sysargv = copy.copy(sys.argv)\n yield\n sys.argv = orig_sysargv\n\n\ndef _unnest_dataframe_dicts(df, nested_column, keys):\n def _unnest_func(nested_dict):\n if pandas.isnull(nested_dict):\n return [np.nan] * len(keys)\n return [nested_dict.get(key, np.nan) for key in keys]\n\n unzipped_values = zip(*df[nested_column].map(_unnest_func).values)\n\n for key, values in zip(keys, unzipped_values):\n df[key] = values\n\n del df[nested_column]\n return df\n\n\ndef _values_dicts_to_df(values_dicts):\n df = pandas.DataFrame(values_dicts, dtype=object)\n if len(df) == 0:\n df = pandas.DataFrame(columns=['datetime', 'value', 'qualifiers', 'last_checked',\n 'last_modified'])\n else:\n df = df.set_index(pandas.DatetimeIndex(pandas.to_datetime(df['datetime'])))\n return df\n\n\ndef _values_df_to_dicts(values_df):\n df = values_df.where(pandas.notnull(values_df), None)\n dicts = df.T.to_dict().values()\n dicts.sort(key=lambda d: d['datetime'])\n return dicts\n\n\ndef _variable_group_to_dict(store, variable_group):\n _v_attrs = variable_group._v_attrs\n variable_dict = dict([\n (key, getattr(_v_attrs, key))\n for key in _v_attrs._f_list()\n ])\n values_path = variable_group._v_pathname + '/values'\n values_df = store[values_path]\n variable_dict['values'] = _values_df_to_dicts(values_df)\n\n return variable_dict\n\n\ndef _update_stored_sites(store, sites_dict):\n new_sites_df = _sites_dict_to_df(sites_dict)\n if SITES_TABLE in store:\n sites_df = store[SITES_TABLE]\n new_sites_df = new_sites_df.combine_first(sites_df)\n # explicitly cast 'uses_dst' column back to bool, it gets converted to\n # object dtype in pandas <= 0.11 (s/b fixed in later versions)\n new_sites_df['uses_dst'] = new_sites_df['uses_dst'].astype(bool)\n\n store[SITES_TABLE] = new_sites_df\n" ]
[ [ "pandas.notnull", "pandas.to_datetime", "pandas.isnull", "pandas.io.pytables.get_store", "pandas.DataFrame" ] ]
wjj19950101/Magrank_v1
[ "73ab296f4269ccc9a943b22088c8ca3e1c88af5e" ]
[ "Mag2021-main-code/Proactive caching/FCNN/FCNN.py" ]
[ "# #################################################################\r\n# Codes have been tested successfully on Python 3.6.0 with TensorFlow 1.14.0.\r\n# #################################################################\r\nimport numpy as np\r\nimport sklearn.preprocessing as prep\r\nimport tensorflow as tf\r\nfrom tensorflow.contrib import layers\r\nimport random\r\n\r\n\r\ndef xavier_init(fan_in, fan_out, constant=1):\r\n low = -constant * np.sqrt(6 / (fan_in + fan_out))\r\n high = constant * np.sqrt(6 / (fan_in + fan_out))\r\n return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32)\r\n\r\n\r\nclass MLP(object):\r\n def __init__(self, layernum, lamda, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer()):\r\n a0 = str(random.random())\r\n print(a0)\r\n \r\n self.layernum = layernum \r\n self.transfer = transfer_function\r\n self.x = tf.placeholder(tf.float32, [None, self.layernum[0]])\r\n self.keep_prob = tf.placeholder(tf.float32)\r\n \r\n self.weights = dict()\r\n self.hidden = dict()\r\n\r\n for i in range(len(layernum)-1):\r\n if i==0:\r\n self.hidden[str(i)] = self._add_layer_with_loss(self.x, self.layernum[i], self.layernum[i+1],\r\n 0.1, self.keep_prob, lamda,'layer' + str(i+1),a0)\r\n elif i!=len(layernum)-2:\r\n self.hidden[str(i)] = self._add_layer_with_loss(self.hidden[str(i-1)], self.layernum[i], self.layernum[i+1],\r\n 0.1, self.keep_prob, lamda, 'layer' + str(i + 1),a0)\r\n else:\r\n \r\n self.y = self._add_layer_with_loss(self.hidden[str(i-1)], self.layernum[i], self.layernum[i+1],\r\n 0.1, 1, lamda, 'layer' + str(i + 1), a0,transfer_function=self.transfer)\r\n\r\n self.y_ = tf.placeholder(tf.float32, [None, self.layernum[-1]])\r\n \r\n self.cost0 = tf.nn.l2_loss(tf.subtract(self.y_, self.y)) \r\n tf.add_to_collection(a0, self.cost0)\r\n self.cost = tf.add_n(tf.get_collection(a0), name='total_loss')\r\n tf.summary.scalar('loss', self.cost0)\r\n self.optimizer = optimizer.minimize(self.cost)\r\n\r\n init = tf.global_variables_initializer()\r\n self.sess = tf.InteractiveSession()\r\n self.merged = tf.summary.merge_all()\r\n# self.train_writer = tf.summary.FileWriter('2018-05-07/logs/train', self.sess.graph)\r\n# self.test_writer = tf.summary.FileWriter('2018-05-07/logs/test')\r\n \r\n self.sess.run(init)\r\n \r\n def _add_layer_with_loss(self, input, inshape, outshape, stddev, keep_prob, wl, name, a0, transfer_function=tf.nn.relu):\r\n with tf.variable_scope(name, reuse=True):\r\n with tf.variable_scope('weights', reuse=True):\r\n try:\r\n self.weights['w_'+name] = tf.get_variable(name = 'weight')\r\n except:\r\n self.weights['w_'+name] = tf.Variable(tf.truncated_normal([inshape, outshape], stddev=stddev), name = 'weight')\r\n \r\n # self.weights['w_'+name] = tf.contrib.absolute_import\r\n tf.summary.histogram('histogram', self.weights['w_'+name])\r\n with tf.variable_scope('biases', reuse=True):\r\n try:\r\n self.weights['b_'+name] = tf.get_variable(name = 'bias')\r\n except:\r\n self.weights['b_'+name] = tf.Variable(tf.zeros([outshape]), name = 'bias')\r\n tf.summary.histogram('histogram', self.weights['b_'+name])\r\n \r\n Wx_plus_b = tf.matmul(input, self.weights['w_'+name]) + self.weights['b_'+name]\r\n output = transfer_function(Wx_plus_b)\r\n tf.summary.histogram('activations', output)\r\n output = tf.nn.dropout(output, keep_prob)\r\n if wl is not None:\r\n weight_loss = tf.multiply(tf.nn.l2_loss(self.weights['w_'+name]), wl, name='weight_loss')\r\n tf.add_to_collection(a0, weight_loss)\r\n return output\r\n\r\n def getoutputs(self, X, keep_prob):\r\n return self.sess.run(self.y, feed_dict={self.x: X, self.keep_prob: keep_prob})\r\n\r\n def getallparas(self):\r\n allparas = []\r\n for i in range(len(self.layernum)-1):\r\n allparas.append(self.weights['w_layer' + str(i + 1)])\r\n allparas.append(self.weights['b_layer' + str(i + 1)])\r\n return self.sess.run(tuple(allparas))\r\n\r\n def getcost(self, X, Y, keep_prob):\r\n return self.sess.run(self.cost0, feed_dict={self.x: X, self.y_: Y, self.keep_prob: keep_prob})\r\n\r\n def variable_summaries(self, var):\r\n with tf.name_scope('summaries'):\r\n mean = tf.reduce\r\n\r\n\r\ndef standard_scale(X_train, X_test):\r\n preprocessor = prep.StandardScaler().fit(X_train)\r\n X_train = preprocessor.transform(X_train)\r\n X_test = preprocessor.transform(X_test)\r\n return X_train, X_test, preprocessor\r\n" ]
[ [ "tensorflow.get_variable", "numpy.sqrt", "tensorflow.zeros", "tensorflow.nn.l2_loss", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.get_collection", "tensorflow.subtract", "tensorflow.name_scope", "tensorflow.nn.dropout", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.InteractiveSession", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "tensorflow.add_to_collection", "tensorflow.summary.histogram", "tensorflow.variable_scope", "sklearn.preprocessing.StandardScaler", "tensorflow.random_uniform" ] ]
raspstephan/nwp-downscale
[ "dbb6d560126a8d13a2748bc088f382e68fa7c0b3" ]
[ "notebooks/visualise_predictions.py" ]
[ "import torch\nfrom torch.nn import functional as F\nfrom torch import nn\nfrom pytorch_lightning.core.lightning import LightningModule\nimport pytorch_lightning as pl\nfrom pytorch_lightning import loggers as pl_loggers\n\nimport torch.optim as optim\nimport torchvision\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\n\nfrom catalyst.data.sampler import DistributedSamplerWrapper\n\nimport xarray as xr\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport pickle\n\nif torch.cuda.is_available():\n device = torch.device(\"cuda\") \nelse:\n device = torch.device(\"cpu\")\n \nimport json\nimport argparse\nimport sys\n\ndef parseInputArgs():\n\n parser = argparse.ArgumentParser(\n description=\"specify_experiment_parameters\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\"--eval_config\", type=str,\n dest=\"config_path\", help='Path to file containing configuration for the evaluation')\n\n return parser.parse_args()\n\ninput_args = parseInputArgs()\n\n\ndef plot_samples_per_input(data, gen, samples, device, save_dir, pure_sr = False):\n fig, axs = plt.subplots(len(data), samples+2, figsize=(5*samples, len(data)*samples))\n gen_images = np.zeros((len(data),samples,128,128))\n for i, d in enumerate(data):\n for s in range(samples):\n if pure_sr:\n cond = torch.tensor(d[0][s:s+1]).unsqueeze(0).to(device)\n noise = torch.zeros(cond.shape[0], 1, cond.shape[2], cond.shape[3]).to(device)\n else:\n cond = torch.tensor(d[0]).unsqueeze(0).to(device)\n noise = torch.randn(cond.shape[0], 1, cond.shape[2], cond.shape[3]).to(device)\n try:\n pred, _ = gen(cond, noise)\n except:\n pred = gen(cond, noise)\n pred = pred.detach().cpu().numpy()\n gen_images[i, s, :, :] = pred[0,0,:,:]\n \n for i, d in enumerate(data):\n cond = torch.tensor(d[0]).unsqueeze(0).to(device)\n lr = cond[0,0,:,:].detach().cpu().numpy()\n if lr.shape[0]==64:\n lr = lr[24:40, 24:40]\n hr = d[1][0,:,:]\n mn = np.min([np.min(hr), np.min(pred), np.min(gen_images[i,:,:,:])])\n mx = np.max([np.max(hr), np.max(pred), np.max(gen_images[i,:,:,:])])\n im = axs[i,0].imshow(lr, vmin=mn, vmax=mx, cmap='gist_ncar_r')\n# plt.colorbar(im, ax=axs[i,0])\n im = axs[i,1].imshow(hr, vmin=mn, vmax=mx, cmap='gist_ncar_r')\n# plt.colorbar(im, ax=axs[j,0], shrink=0.7)\n for j in range(samples):\n im = axs[i,j+2].imshow(gen_images[i,j,:,:], vmin=mn, vmax=mx, cmap='gist_ncar_r')\n# plt.colorbar(im, ax=axs[j,i], shrink=0.7)\n# plt.show() \n cols = [\"Forecast\", \"Ground Truth\"] + [f\"Prediction {i+1}\" for i in range(samples)]\n for ax, col in zip(axs[0], cols):\n ax.set_title(col)\n plt.tight_layout()\n plt.savefig(save_dir+'sample_predictions.png', dpi=200)\n \n \ndef visualise(input_args):\n # Load Experiment Args and Hyperparameters\n\n args = json.load(open(input_args.config_path))\n parser = argparse.ArgumentParser(args)\n parser.set_defaults(**args)\n args, _ = parser.parse_known_args()\n\n print(\"Args loaded\")\n \n device = torch.device(f'cuda:{args.gpu}')\n print(\"device\", device)\n model_dir = args.save_hparams[\"save_dir\"]+args.save_hparams[\"run_name\"]+str(args.save_hparams[\"run_number\"])+\"/\"\n\n sys.path.append(model_dir)\n\n from run_src.models import GANs\n from run_src.dataloader import TiggeMRMSDataset\n\n #set seed\n torch.manual_seed(args.seed)\n\n ## Load Data and set data params\n ds_test = pickle.load(open(args.data_hparams[\"test_dataset_path\"], \"rb\"))\n \n# sample_indices = pickle.load(open(\"./sample_indices.pkl\", 'rb'))\n sample_indices = np.random.choice(1000, size = 40, replace=False)\n ds_test = torch.utils.data.Subset(ds_test, sample_indices)\n \n print(\"Loading data ... \")\n\n gan = GANs[args.gan].load_from_checkpoint(args.model_path)\n \n gen = gan.gen\n gen = gen.to(device)\n gen.train(False);\n\n print(\"Data loading complete\")\n \n if \"super_resolution\" in args:\n plot_samples_per_input(ds_test, gen, 4, device, model_dir, pure_sr=True) \n else:\n plot_samples_per_input(ds_test, gen, 4, device, model_dir)\n \n\nif __name__ == '__main__':\n visualise(input_args)" ]
[ [ "torch.utils.data.Subset", "matplotlib.pyplot.tight_layout", "numpy.random.choice", "numpy.min", "torch.manual_seed", "torch.zeros", "torch.randn", "matplotlib.pyplot.savefig", "torch.tensor", "numpy.max", "torch.cuda.is_available", "torch.device" ] ]
nessessence/ReID_Transitve_Label
[ "e6bb8b51ddc628ebfa2d20d46e39ba224063a3da" ]
[ "main.py" ]
[ "import torch\nfrom modeling import build_model\nfrom dataset import ImageDataset\nfrom torch.utils.data import DataLoader\n\nfrom distance import Mahalanobis_distance\nfrom uploader import upload_multiple_files,upload_to_db\n\ndef dataloader_collate_fn(batch):\n imgs, img_paths = zip(*batch)\n return torch.stack(imgs, dim=0), img_paths\n\ndef process_featerbank(data_loader,dataset,model,cfg):\n if cfg[\"pre_computed\"][\"feature_bank\"]:\n print(f\"loading pre-computed feature_bank from {cfg['pre_computed']['feature_bank']} \")\n feature_bank = torch.load(cfg[\"pre_computed\"][\"feature_bank\"])\n print(\"loading feature_bank complete\")\n return feature_bank\n print(\"start processing feature bank\")\n feature_bank = {} # {img_path:feature}\n batch_size = cfg[\"dataloader\"][\"batch_size\"]\n print(batch_size,len(dataset))\n percent = 0\n with torch.no_grad():\n for i,(img_batch,img_path_batch) in enumerate(data_loader):\n img_batch = img_batch.cuda(0) if torch.cuda.device_count() >= 1 else img_batch\n feature_batch = model(img_batch).half()\n if i % 10 == 0: \n percent += 100*(10*batch_size)/(len(dataset))\n print(f\" processing : {percent}% \")\n feature_bank.update({img_path:feature.half() for img_path,feature in zip(img_path_batch,feature_batch)})\n del feature_batch\n\n print(\"complete processing feature bank\")\n # backup \"feature_bank\"\n torch.save(feature_bank, f\"{cfg['log']['log_dir']}/{cfg['log']['feature_bank']}\")\n return feature_bank \n\ndef process_distancing(feature_bank,cfg):\n if cfg[\"pre_computed\"][\"distance_mat\"]:\n print(f\"loading pre-computed distance_mat from {cfg['pre_computed']['distance_mat']} \")\n distance_mat = torch.load(cfg[\"pre_computed\"][\"distance_mat\"])\n print(\"loading distance_mat complete\")\n return distance_mat\n print(\"start calculating distace/simlarity\")\n distance_mat = Mahalanobis_distance(torch.stack(list(feature_bank.values())))\n torch.save(distance_mat,f\"{cfg['log']['log_dir']}/{cfg['log']['distance_mat']}\")\n print(f\"distance_mat has been saved to {cfg['log']['log_dir']}/{cfg['log']['distance_mat']}\")\n return distance_mat\n\ndef upload_to_storage(img_paths,cfg):\n print(\"start uploading to storage\")\n if not cfg[\"storage\"][\"local\"]: upload_multiple_files(img_paths,cfg[\"storage\"][\"bucket\"])\n\ndef process_argsort(distance_mat,cfg):\n import numpy as np\n if cfg[\"pre_computed\"][\"argsorted_distance\"]:\n print(f\"loading pre-computed argsorted_distance from {cfg['pre_computed']['argsorted_distance']} \")\n argsorted_distance = np.load(cfg[\"pre_computed\"][\"argsorted_distance\"])\n print(\"loading argsorted_distance complete\")\n return argsorted_distance\n print(\"start calculating argsort distance\")\n argsorted_distance = np.argsort(distance_mat.cpu().numpy(),axis=1)\n np.save(f\"{cfg['log']['log_dir']}/{cfg['log']['argsorted_distance']}\",argsorted_distance )\n print(f\"argsorted_distance has been saved to {cfg['log']['log_dir']}/{cfg['log']['argsorted_distance']}\")\n return argsorted_distance\n\ndef upload_to_database(argsorted_distance,img_paths,cfg):\n initial_rank = argsorted_distance[:,:cfg['budget']['k']+1]\n storage = 'local' if cfg[\"storage\"][\"local\"] else fg[\"storage\"][\"bucket\"]\n upload_to_db(cfg[\"db\"],storage,cfg[\"budget\"][\"k\"],initial_rank,img_paths)\n print(\"complete updating db\")\n\ndef main(cfg):\n\n # data loader \n print(\"start DataLoader\")\n print(f\"dataset dir: {cfg['datasets']['dir_path']}\")\n dataset = ImageDataset(cfg)\n data_loader = DataLoader(\n dataset, batch_size=cfg[\"dataloader\"][\"batch_size\"], shuffle=False, \n num_workers=cfg[\"dataloader\"][\"num_workers\"],collate_fn=dataloader_collate_fn)\n print(f\"There are {len(dataset)} images to be processed\")\n \n # feature extractor model\n print(\"start initialize model\")\n model = build_model(cfg) \n model.load_param(cfg[\"model\"][\"weight\"])\n model = model.cuda(0) if torch.cuda.device_count() >= 1 else img_batch\n model.eval()\n\n feature_bank = process_featerbank(data_loader,dataset,model,cfg)\n img_paths = list(feature_bank.keys())\n distance_mat = process_distancing(feature_bank,cfg)\n del feature_bank\n upload_to_storage(img_paths,cfg)\n argsorted_distance = process_argsort(distance_mat,cfg)\n del distance_mat\n upload_to_database(argsorted_distance,img_paths,cfg)\n del argsorted_distance\n print(\"processing has been completed\")\n\n\n\n\n\n # if cfg[\"pre_computed\"][\"feature_bank\"]:\n # print(f\"loading pre-computed feature_bank from {cfg['pre_computed']['feature_bank']} \")\n # feature_bank = torch.load(cfg[\"pre_computed\"][\"feature_bank\"])\n # print(\"loading feature_bank complete\")\n # else:\n # print(\"start processing feature bank\")\n # feature_bank = {} # {img_path:feature}\n\n # batch_size = cfg[\"dataloader\"][\"batch_size\"]\n # percent = 0\n # with torch.no_grad():\n # for i,(img_batch,img_path_batch) in enumerate(data_loader):\n # img_batch = img_batch.cuda(0) if torch.cuda.device_count() >= 1 else img_batch\n # #print(img_batch.type())\n # feature_batch = model(img_batch).half()\n # if i % 10 == 0: \n # percent += 100*(10*batch_size)/(len(dataset))\n # print(f\" processing : {percent}% \")\n\n # feature_bank.update({img_path:feature.half() for img_path,feature in zip(img_path_batch,feature_batch)})\n # del feature_batch\n\n # print(\"complete processing feature bank\")\n # # backup \"feature_bank\"\n # torch.save(feature_bank, f\"{cfg['log']['log_dir']}/{cfg['log']['feature_bank']}\")\n # print(f\"feature bank has been saved to {cfg['log']['log_dir']}/{cfg['log']['feature_bank']}\")\n\n # print(\"start calculating distace/simlarity\")\n # distance_mat = Mahalanobis_distance(torch.stack(list(feature_bank.values())))\n # torch.save(distance_mat,f\"{cfg['log']['log_dir']}/{cfg['log']['distance_mat']}\")\n # print(f\"distance_mat has been saved to {cfg['log']['log_dir']}/{cfg['log']['distance_mat']}\")\n \n # # db:\n # print(\"start uploading process\")\n # img_paths = list(feature_bank.keys())\n # del feature_bank\n # if not cfg[\"storage\"][\"local\"]: upload_multiple_files(img_paths,cfg[\"storage\"][\"bucket\"])\n # # initial_rank = torch.argsort(distance_mat,dim=1)[:,:cfg['budget']['k']+1].cpu().numpy()\n # import numpy as np\n # argsorted_distance = np.argsort(distance_mat.cpu().numpy(),axis=1)[:,:cfg['budget']['k']+1]\n # np.save(f\"{cfg['log']['log_dir']}/{cfg['log']['argsorted_distance']}\",argsorted_distance )\n # print(f\"argsorted_distance has been saved to {cfg['log']['log_dir']}/{cfg['log']['argsorted_distance']}\")\n # initial_rank = argsorted_distance[:,:cfg['budget']['k']+1]\n # del distance_mat\n # storage = 'local' if cfg[\"storage\"][\"local\"] else fg[\"storage\"][\"bucket\"]\n # upload_to_db(cfg[\"db\"],storage,cfg[\"budget\"][\"k\"],initial_rank,img_paths)\n # print(\"complete uploading process\")\n\n\n\nif __name__ == '__main__':\n # import argparse \n # parser = argparse.ArgumentParser(description=\"Transitive Rebeler Platform\")\n # parser.add_argument(\"-fb\",\"--feature_bank\", default=\"\", help=\"path to feature_bank\", type=str)\n # args = parser.parse_args()\n # load cfg\n import yaml\n with open(\"config.yaml\") as cfg_file:\n cfg = yaml.load(cfg_file, Loader=yaml.FullLoader)\n # if not args.feature_bank: cfg['pre_computed']['feature_bank'] : cfg['pre_computed']['feature_bank'] = parser.feature_bank\n\n main(cfg)\n\n\n" ]
[ [ "torch.load", "torch.utils.data.DataLoader", "numpy.save", "torch.no_grad", "torch.stack", "numpy.load", "torch.cuda.device_count", "torch.save" ] ]
StefanosChaliasos/types-bug-study-artifact
[ "eafcb89bf2b383b3fb110177ad404568c9ee0420" ]
[ "scripts/rq4.py" ]
[ "#! /usr/bin/env python3\nimport argparse\nimport json\nimport statistics\nimport os\n\nfrom collections import defaultdict\n\nimport matplotlib.pylab as plt\nimport matplotlib.offsetbox as offsetbox\nimport seaborn as sns\nimport pandas as pd\n\n\ndef get_args():\n parser = argparse.ArgumentParser(\n description='Generate figures and tables of RQ4 (characteristics).')\n parser.add_argument(\"characteristics\", help=\"JSON with characteristics.\")\n parser.add_argument(\"bugs\", help=\"JSON with bugs.\")\n parser.add_argument(\n \"test_cases\",\n help=\"Directory that contains the test cases of the bugs.\")\n parser.add_argument(\n \"--output\",\n default=\"characteristics.pdf\",\n help=\"Filename to save the figure.\")\n parser.add_argument(\n \"--all\",\n action=\"store_true\",\n help=\"Print table with the distribution of all characteristics\")\n parser.add_argument(\n \"--limit\",\n type=int,\n default=5,\n help=\"The number of the most / least frequent characteristics (default: 5)\")\n parser.add_argument(\n \"--top\",\n type=int,\n default=5,\n help=\"The number of the most frequent characteristics per language (default: 5)\")\n return parser.parse_args()\n\n\ndef construct_dataframe(data):\n characteristics = []\n dataframes = {}\n for key, value in data.items():\n framedata = []\n for subkey, subvalue in value['subcategories'].items():\n framedata.append({\n 'Characteristic': subkey,\n 'Bug prevalence': 100 * (int(subvalue['total']) / 320),\n })\n if 'is_common' not in subvalue:\n print(subkey)\n if subvalue['is_common']:\n characteristics.append((subkey, subvalue['is_common'],\n 100 * (int(subvalue['total']) / 320)))\n framedata = sorted(framedata, key=lambda k: k['Bug prevalence'],\n reverse=True)[:4]\n percentage = str(round((int(value['total']) / 320) * 100, 2))\n dataframes[key + \" (\" + percentage + \"%)\"] = pd.DataFrame(framedata)\n return dataframes, characteristics\n\n\ndef plot_fig(dataframes, output):\n plt.style.use('ggplot')\n sns.set(style=\"whitegrid\")\n plt.rcParams['font.family'] = 'DejaVu Sans'\n plt.rcParams['figure.figsize'] = (8, 4.5)\n plt.rcParams['axes.labelsize'] = 8\n plt.rcParams['ytick.labelsize'] = 6.8\n plt.rcParams['xtick.labelsize'] = 6\n plt.rcParams['font.serif'] = 'DejaVu Sans'\n plt.rcParams['font.monospace'] = 'Inconsolata Medium'\n plt.rcParams['axes.labelweight'] = 'bold'\n\n fig, axs = plt.subplots(nrows=8, sharex=True)\n\n for i, (key, dataframe) in enumerate(dataframes.items()):\n dataframe = dataframe.sort_values(\n 'Bug prevalence', ascending=True)\n dataframe.plot.barh(x='Characteristic', y='Bug prevalence',\n color='grey', ax=axs[i])\n ob = offsetbox.AnchoredText(key, loc=1,\n prop=dict(color='black', fontsize=7))\n ob.patch.set(boxstyle='round', color='lightgrey', alpha=1)\n axs[i].add_artist(ob)\n axs[i].set_ylabel('')\n axs[i].set_xlabel('Bug prevalence (%)')\n axs[i].get_legend().remove()\n axs[i].set_xlim([0, 70])\n for line in axs[i].get_xgridlines():\n line.set_linewidth(0.3)\n for line in axs[i].get_ygridlines():\n line.set_linewidth(0.3)\n [i.set_linewidth(0.3) for i in axs[i].spines.values()]\n plt.savefig(output, format='pdf', bbox_inches='tight',\n pad_inches=0)\n\n\ndef print_table(data):\n res = []\n for category, values in data.items():\n for subcategory, cat_values in values['subcategories'].items():\n res.append((subcategory, category, cat_values['total'],\n cat_values['is_common']))\n for subsubcat, total in cat_values['subcategories'].items():\n if subsubcat == \"is_common\":\n continue\n res.append((subsubcat, category, total, True))\n res = sorted(res, key=lambda x: (x[1], -x[2]))\n row_format = \"{:<33}\" + \"{:<35}\" + \"{:<20}\" + \"{:<20}\"\n print()\n print(\"Distribution of Language Features (corresponding to a complete version of Figure 15)\")\n print(93 * \"=\")\n print(row_format.format(\"Feature\", \"Category\", \"# Test Cases\", \"Common\"))\n print(93 * \"-\")\n for row in res:\n print(row_format.format(row[0], row[1], row[2], str(row[3])))\n\n\ndef print_generic_stats_table(compilable, non_compilable, locs, classes,\n methods, calls):\n print(\"General Statistics on Test Case Characteristics (Table 2)\")\n row_format = \"{:<33}\" + \"{:>30}\"\n print(63 * \"=\")\n print(row_format.format(\n \"Compilable test cases\",\n \"{} / 320 ({:.2f}%)\".format(compilable, (compilable / 320) * 100)))\n print(row_format.format(\n \"Non-compilable test cases\",\n \"{} / 320 ({:.2f}%)\".format(\n non_compilable, (non_compilable / 320) * 100)))\n print(63 * \"-\")\n print(row_format.format(\"LoC (mean)\", \"{:.2f}\".format(locs[\"mean\"])))\n print(row_format.format(\"LoC (median)\", locs[\"median\"]))\n print(63 * \"-\")\n print(row_format.format(\n \"Number of class decls (mean)\", \"{:.2f}\".format(classes[\"mean\"])))\n print(row_format.format(\n \"Number of class decls (median)\", classes[\"median\"]))\n print(63 * \"-\")\n print(row_format.format(\n \"Number of method decls (mean)\", \"{:.2f}\".format(methods[\"mean\"])))\n print(row_format.format(\n \"Number of method decls (median)\", methods[\"median\"]))\n print(row_format.format(\n \"Number of method calls (mean)\", \"{:.2f}\".format(calls[\"mean\"])))\n print(row_format.format(\n \"Number of method calls (median)\", calls[\"median\"]))\n print(63 * \"-\")\n print()\n\n\ndef print_most_least_chars_table(characteristics, limit):\n row_format = \"{:<33}\" + \"{:>30}\"\n print(\"Most Frequent Features (Table 3)\")\n print(63 * \"=\")\n most = characteristics[-limit:]\n most.sort(reverse=True, key=lambda x: x[2])\n for char in most:\n print(row_format.format(char[0], \"{:.2f}%\".format(char[2])))\n print()\n print(\"Least frequent features (Table 3)\")\n print(63 * \"=\")\n least = characteristics[:limit]\n least.sort(reverse=True, key=lambda x: x[2])\n for char in least:\n print(row_format.format(char[0], \"{:.2f}%\".format(char[2])))\n print()\n\n\ndef print_most_per_lang(data, limit):\n def _get(thunk, default=\"-\"):\n try:\n return thunk()\n except IndexError:\n return default\n\n row_format = (\"{:<30}\" + \"{:>7}\") * 4\n print(\"Most Bug-Triggering Features per Language (Table 4)\")\n print(155 * \"=\")\n data = list(zip(data.items()))\n lang1 = data[0][0][1]\n lang1_name = data[0][0][0]\n lang1.sort(reverse=True, key=lambda x: x[1])\n lang2 = data[1][0][1]\n lang2_name = data[1][0][0]\n lang2.sort(reverse=True, key=lambda x: x[1])\n lang3 = data[2][0][1]\n lang3_name = data[2][0][0]\n lang3.sort(reverse=True, key=lambda x: x[1])\n lang4 = data[3][0][1]\n lang4_name = data[3][0][0]\n lang4.sort(reverse=True, key=lambda x: x[1])\n print(\"{}{}{}{}\".format(\n lang1_name.center(38), lang2_name.center(38),\n lang3_name.center(38), lang4_name.center(38)\n ))\n print(155 * \"-\")\n for i in range(limit):\n print(row_format.format(\n _get(lambda: lang1[i][0]), \"{}{:.2f}% | \".format(\n \" \" if _get(lambda: lang1[i][1], 0) < 10 else \"\",\n _get(lambda: lang1[i][1], 0)),\n _get(lambda: lang2[i][0]), \"{}{:.2f}% | \".format(\n \" \" if _get(lambda: lang2[i][1], 0) < 10 else \"\",\n _get(lambda: lang2[i][1], 0)),\n _get(lambda: lang3[i][0]), \"{}{:.2f}% | \".format(\n \" \" if _get(lambda: lang3[i][1], 0) < 10 else \"\",\n _get(lambda: lang3[i][1], 0)),\n _get(lambda: lang4[i][0]), \"{}{:.2f}% | \".format(\n \" \" if _get(lambda: lang4[i][1], 0) < 10 else \"\",\n _get(lambda: lang4[i][1], 0))\n ))\n print()\n\n\ndef print_categories_stats(categories):\n row_format = \"{:<33}\" + \"{:>7}\"\n print(\"Frequency of Characteristic Categories (see Section 3.4.2)\")\n print(40 * \"=\")\n for row in categories:\n print(row_format.format(row[0], \"{:.2f}%\".format(row[1])))\n print()\n\n\ndef print_comparative_stats(characteristics):\n print(\"Comparative Analysis Stats (see Section 3.4.3)\")\n print(43 * \"=\")\n print(\"{}: {:.2f}\".format(\n \"Scala Implicits\",\n next(i[1] for i in characteristics['Scala'] if i[0] == 'Implicits')))\n print(\"{}: {:.2f}\".format(\n \"Scala Higher-kinded types\",\n next(i[1] for i in characteristics['Scala']\n if i[0] == 'Higher-kinded type')))\n print(\"{}: {:.2f}\".format(\n \"Scala Pattern matching\",\n next(i[1] for i in characteristics['Scala']\n if i[0] == 'Pattern matching')))\n print(\"{}: {:.2f}\".format(\n \"Scala Algebraic Data Types\",\n next(i[1] for i in characteristics['Scala']\n if i[0] == 'Algebraic data type')))\n print(\"{}: {:.2f}\".format(\n \"Kotlin Nullable types\",\n next(i[1] for i in characteristics['Kotlin']\n if i[0] == 'Nullable type')))\n print(\"{}: {:.2f}\".format(\n \"Kotlin Extension function / property\",\n next(i[1] for i in characteristics['Kotlin']\n if i[0] == 'Extension function / property')))\n\n\ndef create_dict():\n return {\"total\": [], \"java\": [], \"groovy\": [], \"kotlin\": [], \"scala\": []}\n\n\ndef find_lang(string):\n if \"java\" in string:\n return \"java\"\n elif \"groovy\" in string:\n return \"groovy\"\n elif \"kotlin\" in string:\n return \"kotlin\"\n else:\n return \"scala\"\n\n\ndef get_stats(data):\n stats = {}\n for k in data.keys():\n stats[k] = {\n \"max\": max(data[k]),\n \"min\": min(data[k]),\n \"mean\": statistics.mean(data[k]),\n \"median\": int(statistics.median(data[k])),\n \"sd\": statistics.stdev(data[k])\n }\n return stats\n\n\ndef get_compilable_non_compilable(json_bugs):\n compilable = sum(1 for v in json_bugs.values() if v['is_correct'])\n non_compilable = sum(1 for v in json_bugs.values() if not v['is_correct'])\n return compilable, non_compilable\n\n\ndef compute_test_cases_stats(test_cases):\n locs = create_dict()\n classes = create_dict()\n methods = create_dict()\n calls = create_dict()\n for f in filter(lambda x: x[0].count(\"/\") == 3, os.walk(test_cases)):\n lang = find_lang(f[0])\n stats_file = os.path.join(f[0], \"stats_locs.json\")\n with open(stats_file, 'r') as f:\n data = json.load(f)\n loc = int(data['loc'])\n data = data['declarations']\n calls_n = data['calls']\n classes[\"total\"].append(data['classes'])\n classes[lang].append(data['classes'])\n methods[\"total\"].append(data['methods/functions'])\n methods[lang].append(data['methods/functions'])\n calls[\"total\"].append(calls_n)\n calls[lang].append(calls_n)\n locs[\"total\"].append(loc)\n locs[lang].append(loc)\n locs_stats = get_stats(locs)\n classes_stats = get_stats(classes)\n methods_stats = get_stats(methods)\n calls_stats = get_stats(calls)\n return locs_stats, classes_stats, methods_stats, calls_stats\n\n\ndef compute_chars_per_lang(bugs):\n chars = {\n \"Java\": defaultdict(lambda: 0),\n \"Groovy\": defaultdict(lambda: 0),\n \"Kotlin\": defaultdict(lambda: 0),\n \"Scala\": defaultdict(lambda: 0)\n }\n stats = {\n \"Java\": [],\n \"Groovy\": [],\n \"Kotlin\": [],\n \"Scala\": []\n }\n for values in bugs.values():\n for char in values[\"chars\"][\"characteristics\"]:\n chars[values[\"language\"]][char] += 1\n for lang, characteristics in chars.items():\n for c, total in characteristics.items():\n stats[lang].append((c, (total/80.0) * 100))\n return stats\n\n\ndef get_categories_stats(chars):\n res = [(category, (values['total'] / 320) * 100)\n for category, values in chars['Bug Causes'].items()]\n res.sort(reverse=True, key=lambda x: x[1])\n return res\n\n\ndef main():\n args = get_args()\n with open(args.bugs, 'r') as f:\n json_bugs = json.load(f)\n with open(args.characteristics, 'r') as f:\n json_chars = json.load(f)\n\n compilable, non_compilable = get_compilable_non_compilable(json_bugs)\n locs, classes, methods, calls = compute_test_cases_stats(args.test_cases)\n stats_per_lang = compute_chars_per_lang(json_bugs)\n categories = get_categories_stats(json_chars)\n\n dataframes, characteristics = construct_dataframe(json_chars['Bug Causes'])\n characteristics = sorted(characteristics, key=lambda tup: tup[2])\n\n print_generic_stats_table(compilable, non_compilable,\n locs['total'], classes['total'],\n methods['total'], calls['total'])\n print_most_least_chars_table(characteristics, args.limit)\n print_most_per_lang(stats_per_lang, args.top)\n print_categories_stats(categories)\n print_comparative_stats(stats_per_lang)\n\n plot_fig(dataframes, args.output)\n\n if args.all:\n print_table(json_chars['Bug Causes'])\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pylab.style.use", "matplotlib.pylab.savefig", "matplotlib.pylab.subplots", "pandas.DataFrame" ] ]
zhaomh1998/TubularRobotics
[ "226e8711061a3fc53bfaee12b02a12e78388c036" ]
[ "wirelessMain.py" ]
[ "import WirelessTool, socket, traceback, colorama, time\nimport numpy as np\ncolorama.init()\n\nTCPServer = WirelessTool.TCPEchoServer(3000,0.1)\nUltrasonic = WirelessTool.TCPClient('127.0.0.1',3001,0.1)\nEncoder = WirelessTool.TCPClient('127.0.0.1',3002,0.1)\nMainController = WirelessTool.TCPClient('127.0.0.1',3003,3)\nwhile(not TCPServer.connect(10)):\n print(\"[INFO]\\tWaiting User Connection\")\n# TODO: If user not connected, go to auto mode\n# Connected\nU0Readings = np.zeros(9)\nU1Readings = np.zeros(9)\nU2Readings = np.zeros(9)\nU0Buffer = np.zeros((10,4))\nU1Buffer = np.zeros((10,4))\nU2Buffer = np.zeros((10,4))\nE0Buffer = 0\nE1Buffer = 0\nU0Count = 0\nU1Count = 0\nU2Count = 0\n\n\ndef decodeCommand(rawData):\n recvLength = len(rawData)\n if(recvLength < 1):\n return [('Z', 0, '0')]\n elif(recvLength == 1):\n if(rawData == 'S'):\n return [('S', 0, '0')]\n else:\n return [('Z', 0, '0')]\n else:\n rawData = rawData.split('\\t')\n indexCounter = 0\n commandList = []\n for element in rawData:\n if(len(element) == 0): # Handles the extra '' sometime when spliting\n continue\n elif(element[0] == 'U' or element[0] == 'E'):\n commandList.append((element[0], int(element[1]), element[2:]))\n elif(element[0] == 'F' or element[0] == 'B' or element[0] == 'L' or element[0] == 'R' or element[0] == 'A'):\n commandList.append((element[0], 0, element[1:]))\n else:\n print('[ERROR]\\tUnexpected command received:->' + element + '<-')\n return commandList\n\ndef prepareData(command,data):\n dataTime = time.time() - initTime\n if (command == 'U0'):\n global U0Count, E0Buffer, E1Buffer\n U0Buffer[U0Count][0] = float(data)\n U0Buffer[U0Count][1] = dataTime\n U0Buffer[U0Count][2] = E0Buffer\n U0Buffer[U0Count][3] = E1Buffer\n if(U0Count == 9): # Buffer full, send\n U0Count = -1\n sendArray('U0', U0Buffer)\n U0Count += 1\n elif (command == 'U1'):\n global U1Count, E0Buffer, E1Buffer\n U1Buffer[U1Count][0] = float(data)\n U1Buffer[U1Count][1] = dataTime\n U1Buffer[U1Count][2] = E0Buffer\n U1Buffer[U1Count][3] = E1Buffer\n if(U1Count == 9): # Buffer full, send\n U1Count = -1\n sendArray('U1', U1Buffer)\n U1Count += 1\n elif (command == 'U2'):\n global U2Count, E0Buffer, E1Buffer\n U2Buffer[U2Count][0] = float(data)\n U2Buffer[U2Count][1] = dataTime\n U2Buffer[U2Count][2] = E0Buffer\n U2Buffer[U2Count][3] = E1Buffer\n if(U2Count == 9): # Buffer full, send\n U2Count = -1\n sendArray('U2', U2Buffer)\n U2Count += 1\n elif (command == 'E0'):\n E0Buffer = float(data)\n elif (command == 'E1'):\n E1Buffer = float(data)\n\ndef sendArray(dataTypeName, dataArr):\n dataStream = dataTypeName + '\\t' + np.array_str(dataArr[:,0]) + '\\t' + np.array_str(dataArr[:,1]) + '\\t' + np.array_str(dataArr[:,2]) + '\\t' + np.array_str(dataArr[:,3])\n TCPServer.write(dataStream)\n\ninitTime = time.time()\nUltrasonic.read() # Clear ultrasonic buffer\nwhile(True):\n try:\n userCommand = TCPServer.read()\n for command,index,data in decodeCommand(userCommand):\n if(command == 'Z'):\n pass\n elif(command == 'F'):\n print(colorama.Fore.YELLOW + '[MANUAL]\\t' + colorama.Style.RESET_ALL + '\\tForward' + data)\n # TCPServer.write('OK')\n MainController.write(command + data + '\\t')\n elif(command == 'B'):\n print(colorama.Fore.YELLOW + '[MANUAL]\\t' + colorama.Style.RESET_ALL + '\\tBackward ' + data)\n # TCPServer.write('OK')\n MainController.write(command + data + '\\t')\n elif(command == 'L'):\n print(colorama.Fore.YELLOW + '[MANUAL]\\t' + colorama.Style.RESET_ALL + '\\tLeft ' + data)\n # TCPServer.write('OK')\n MainController.write(command + data + '\\t')\n elif(command == 'R'):\n print(colorama.Fore.YELLOW + '[MANUAL]\\t' + colorama.Style.RESET_ALL + '\\tRight ' + data)\n # TCPServer.write('OK')\n MainController.write(command + data + '\\t')\n elif(command == 'S'):\n print(colorama.Fore.YELLOW + '[MANUAL]\\t' + colorama.Style.RESET_ALL + '\\tStop')\n # TCPServer.write('OK')\n MainController.write(command + data + '\\t')\n elif(command == 'A'):\n print(colorama.Fore.YELLOW + '[MANUAL]\\t' + colorama.Style.RESET_ALL + '\\tAutomatic Mode switch: ' + data)\n # TCPServer.write('OK')\n MainController.write(command + data + '\\t')\n\n UData = Ultrasonic.read()\n for command,index,data in decodeCommand(UData):\n if(command != 'Z'):\n # prepareData(command + str(index), data)\n if (index == 0):\n U0Readings = np.append(np.delete(U0Readings,0),int(data))\n # print(np.median(U0Readings))\n prepareData('U0', np.median(U0Readings))\n MainController.write('U' + str(np.median(U0Readings)) + '\\t')\n elif (index == 1):\n U1Readings = np.append(np.delete(U1Readings,0),int(data))\n # print('\\t' + str(np.median(U1Readings)))\n prepareData('U1', np.median(U1Readings))\n elif (index == 2):\n U2Readings = np.append(np.delete(U2Readings,0),int(data))\n # print('\\t\\t' + str(np.median(U2Readings)))\n prepareData('U2', np.median(U2Readings))\n\n EData = Encoder.read()\n for command,index,data in decodeCommand(EData):\n if(command != 'Z'):\n prepareData(command + str(index), data)\n\n except BaseException as e:\n print(colorama.Fore.RED + '[ERROR]\\t' + colorama.Style.RESET_ALL + e.message)\n print(colorama.Fore.RED + '[ERROR]\\t' + colorama.Style.RESET_ALL + traceback.format_exc())\n TCPServer.close()\n Ultrasonic.close()\n Encoder.close()\n MainController.close()\n break" ]
[ [ "numpy.array_str", "numpy.zeros", "numpy.delete", "numpy.median" ] ]
alessandrobalata/pyrlmc
[ "493d1ad5378823a9bbb032077bea2838db76602c" ]
[ "objects/control.py" ]
[ "from objects.cont_value import ContValue\nfrom objects.controlled_process import ControlledProcess\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom problems.problem import Problem\n\n\nclass Control:\n '''\n Control object to be used both backward and forward\n '''\n\n def __init__(self, problem: Problem):\n self.values = np.zeros((problem.N + 1, problem.M)) * np.nan\n self.u_max = problem.u_max\n self.u_min = problem.u_min\n self.U = problem.U\n self.running_reward = problem.running_reward\n self.dt = problem.dt\n self.M = problem.M\n self.optimization_type = problem.optimization_type\n self.step_gradient = problem.step_gradient\n self.epsilon_gradient = problem.epsilon_gradient\n self.first_derivative = problem.first_derivative\n self.second_derivative = problem.second_derivative\n self.N = problem.N\n\n def compute(self, n: int, x: np.ndarray, cont_value: ContValue, coeff: np.ndarray) -> np.ndarray:\n '''\n\n :param n:\n :param x:\n :param cont_value:\n :param coeff:\n :return:\n '''\n if self.optimization_type == 'extensive':\n return self._extensive_search(n, x, cont_value, coeff)\n elif self.optimization_type == 'gradient' and n < self.N - 3:\n u_tp1 = x * 0\n return self._gradient_descent(n, x, u_tp1, cont_value, coeff)\n return self._extensive_search(n, x, cont_value, coeff)\n\n def _gradient_descent(self, n: int, x: np.ndarray, u_tp1: np.ndarray, cont_value: ContValue, coeff: np.ndarray) -> \\\n np.ndarray:\n '''\n\n :param x:\n :param coeff:\n :param u_tp1:\n :return:\n '''\n u = u_tp1\n convergence = False\n while not convergence:\n tmp = u - self.__ratio_derivatives(n, x, u, cont_value, coeff) * self.step_gradient\n variation = np.abs(tmp - u) / (1 + np.abs(u))\n convergence = variation.all() < self.epsilon_gradient\n u = tmp\n self.values[n, :] = u\n return self.values[n, :].reshape(1, self.M)\n\n def __ratio_derivatives(self, n: int, x: np.ndarray, u: np.ndarray, cont_value: ContValue, coeff: np.ndarray) -> \\\n np.ndarray:\n '''\n\n :param x:\n :param u:\n :param coeff:\n :return:\n '''\n numerator = cont_value.derivative(n, x, u, coeff) + self.first_derivative(n, x, u) * self.dt\n denumerator = cont_value.second_derivative(n, x, u, coeff) + self.second_derivative(n, x, u) * self.dt\n return numerator / denumerator\n\n def _extensive_search(self, n: int, x: np.ndarray, cont_value: ContValue, coeff: np.ndarray) -> np.ndarray:\n '''\n Computes the optimal control by testing a number of control values in the interval u_min, u_max\n :param n: time step\n :param x: state vector\n :param cont_value: continuation value object\n :param coeff: vector of regression coefficients\n :return: control vector\n '''\n print('computing the control')\n test = np.linspace(self.u_min, self.u_max, self.U).reshape(1, self.U)\n idx = np.argmin(self.running_reward(x, test.T).T * self.dt +\n cont_value.compute_batch(n, x.reshape(1, self.M), test, coeff),\n axis=1)\n self.values[n, :] = test[0, idx]\n return self.values[n, :].reshape(1, self.M)\n\n def plot(self) -> None:\n '''\n Plots the control process over time\n :return: None\n '''\n plt.figure()\n plt.plot(self.values)\n plt.xlabel('time step')\n plt.ylabel('control value')\n plt.title('Control Process over time')\n\n def scatter(self, controlled_process: ControlledProcess, time: int) -> None:\n '''\n Plots the control process against the values of the controlled process at a given time\n :return: None\n '''\n plt.figure()\n plt.plot(controlled_process.values[time, :], self.values[time, :], 'o')\n plt.xlabel('process value')\n plt.ylabel('control value')\n plt.title('Control vs. Controlled Process')\n" ]
[ [ "numpy.abs", "matplotlib.pyplot.title", "numpy.linspace", "matplotlib.pyplot.figure", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "numpy.zeros", "matplotlib.pyplot.ylabel" ] ]
ferrocactus/CellarV
[ "069044b742a1ae5dcc494606ee5d975987e620f1" ]
[ "controller/data_loader.py" ]
[ "import os\nimport gc\n\nimport numpy as np\nimport dash\nfrom dash.dependencies import Input, Output, State\nfrom dash.exceptions import PreventUpdate\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\n\nfrom app import app, logger, dbroot\nfrom controller.cellar.utils.exceptions import UserError\nfrom .cellar.core import read_adata, cl_add_gene_symbol\nfrom .cellar.utils.misc import is_sparse\nfrom .multiplexer import MultiplexerOutput\nfrom .notifications import _prep_notification\n\n\[email protected](\n Output(\"shape-signal-load\", \"data\"),\n Output(\"feature-list-signal\", \"data\"),\n Output(\"dataset-load\", \"style\"),\n Output(\"data-loaded-plot-signal\", \"data\"),\n Output(\"data-loaded-annotation-table-signal\", \"data\"),\n Output(\"main-data-load-clean\", \"data\"),\n Output(\"side-data-load-clean\", \"data\"),\n MultiplexerOutput(\"main-other-features-change\", \"data\"),\n MultiplexerOutput(\"side-other-features-change\", \"data\"),\n MultiplexerOutput(\"push-notification\", \"data\"),\n\n Input(\"load-dataset-btn\", \"n_clicks\"),\n State(\"dataset-dropdown\", \"value\"),\n State(\"active-plot\", \"data\"),\n prevent_initial_call=True\n)\ndef load_dataset(n1, dname, actp):\n \"\"\"\n Loads dataset to the active plot given its path.\n \"\"\"\n ctx = dash.callback_context\n if not ctx.triggered or n1 is None:\n raise PreventUpdate\n\n an = 'a1' if actp == 1 else 'a2'\n\n # Initialize empty dataset\n dbroot.adatas[an] = {}\n gc.collect()\n\n try:\n adata = read_adata(dname)\n if adata.shape[0] <= 2:\n raise UserError(\"Dataset contains less than 3 points.\")\n dbroot.adatas[an]['adata'] = adata\n except Exception as e:\n logger.error(str(e))\n error_msg = \"Error encountered while reading file. Maybe the file \" +\\\n \"is not formatted properly, or invalid entries were found.\"\n logger.error(error_msg)\n return [dash.no_update] * 9 + [_prep_notification(error_msg, \"danger\")]\n\n dbroot.adatas[an]['name'] = os.path.splitext(\n os.path.basename(dname))[0]\n logger.info(f\"Read {dname} info {an}.\")\n if is_sparse(dbroot.adatas[an]['adata'].X):\n logger.info(\"Found Sparse Matrix.\")\n\n t1 = 1 if an == 'a1' else dash.no_update\n t2 = 1 if an == 'a2' else dash.no_update\n\n return 1, 1, {}, 1, 1, t1, t2, t1, t2, dash.no_update\n\n\ndef _prettify_time(s):\n s = int(np.round(s))\n hrs, s = divmod(s, 3600)\n m, s = divmod(s, 60)\n t = f\"{hrs}h \" if hrs > 0 else \"\"\n t += f\"{m}m \" if m > 0 else \"\"\n t += f\"{s}s\"\n return t\n\n\ndef _get_singler_warning(nos, nof):\n nos, nof = nos / 1000, nof / 1000\n singler = 120 if nos < 5 else 0.18 * nos**2 + 17 * nos + 28\n msg = \"Running SingleR on this dataset can \" + \\\n \"take between \" + _prettify_time(singler) + \" to \" + \\\n _prettify_time(2 * singler) + \". Do you \" + \\\n \"want to continue?\"\n return msg\n\n\ndef _get_run_times(nos, nof):\n nos /= 1000\n nof /= 1000\n\n table_header = [\n html.Thead(html.Tr([html.Th(\"Method\"), html.Th(\"Est. Runtime\")]))\n ]\n\n # Assume quadratic model on data\n umap = 5 if nos < 5 else 0.002 * nos**2 + 0.21 * nos + 8\n if nof > 10:\n umap *= (nof / 5)\n umaprow = html.Tr([html.Td(\"PCA (40 PC) + UMAP\"), html.Td(\n _prettify_time(umap))])\n\n # Assume quadratic model on data\n tsne = 15 if nos < 5 else 0.02 * nos**2 + 0.13 * nos + 20\n if nof > 10:\n tsne *= (nof / 5)\n tsnerow = html.Tr([html.Td(\"PCA (40 PC) + t-SNE\"), html.Td(\n _prettify_time(tsne))])\n\n pca = 1 if nos < 5 else 0.00031 * nos ** 2 + 0.0465 * nos + 0.97\n if nof > 10:\n pca *= (nof / 5)\n pcarow = html.Tr([html.Td(\"PCA (40 PC) + PCA\"), html.Td(\n _prettify_time(pca))])\n\n # Assume linear model on data (since using approx nn)\n leiden = 2 if nos < 5 else 0.7 * nos\n leidenrow = html.Tr([html.Td(\"Leiden Clustering (auto + max iter)\"),\n html.Td(_prettify_time(leiden))])\n\n # Assume linear model on data\n de = 1 if nos < 5 else 1.4 * nos\n if nof > 5:\n de *= (nof / 5)\n derow = html.Tr([html.Td(\"DE Analysis (1 vs rest)\"), html.Td(\n _prettify_time(de))])\n\n # Need to run 2 pca's and 2 umaps so this should be a good approx\n scanpyrow = html.Tr([html.Td(\"Scanpy Ingest (Label Transfer)\"), html.Td(\n _prettify_time(2 * umap) + \" - \" + _prettify_time(4 * umap))])\n\n # Assume quadratic model on data\n singler = 120 if nos < 5 else 0.18 * nos**2 + 17 * nos + 28\n singlerrow = html.Tr([html.Td(\"SingleR (Label Transfer)\"), html.Td(\n _prettify_time(singler) + \" - \" + _prettify_time(2 * singler))])\n\n table_body = [html.Tbody([\n umaprow, tsnerow, pcarow, leidenrow, derow, scanpyrow, singlerrow])]\n table = dbc.Table(table_header + table_body, bordered=False)\n return table\n\n\[email protected](\n Output(\"no-samples\", \"children\"),\n Output(\"no-features\", \"children\"),\n Output(\"run-time-popover-body\", \"children\"),\n\n Input(\"shape-signal-load\", \"data\"),\n Input(\"shape-signal-load-prep\", \"data\"),\n Input(\"shape-signal-load-prep-atac\", \"data\"),\n Input(\"shape-signal-atoggle\", \"data\"),\n\n State(\"active-plot\", \"data\"),\n prevent_initial_call=True\n)\ndef update_shape(s1, s2, s3, s4, actp):\n \"\"\"\n When a new dataset is loaded, or the active plot is switched,\n return the shape of the data. Otherwise, return N/A.\n \"\"\"\n nos, nof = 'N/A', 'N/A'\n\n ctx = dash.callback_context\n if not ctx.triggered or (s1 is None and s2 is None and s3 is None):\n raise PreventUpdate\n\n an = 'a1' if actp == 1 else 'a2'\n if an not in dbroot.adatas:\n raise PreventUpdate\n if 'adata' not in dbroot.adatas[an]:\n raise PreventUpdate\n\n nos, nof = dbroot.adatas[an]['adata'].shape\n\n return nos, nof, [\n html.Span(\"Rough estimates based on similar datasets\"),\n html.Br(), html.Br(),\n _get_run_times(nos, nof)\n ]\n\n\[email protected](\n Output(\"main-feature-list\", \"options\"),\n Output(\"side-feature-list\", \"options\"),\n Output(\"main-feature-list-spatial\", \"options\"),\n Output(\"side-feature-list-spatial\", \"options\"),\n MultiplexerOutput(\"push-notification\", \"data\"),\n\n Input(\"feature-list-signal\", \"data\"),\n Input(\"feature-list-signal-prep\", \"data\"),\n Input(\"feature-list-signal-prep-atac\", \"data\"),\n State(\"active-plot\", \"data\"),\n prevent_initial_call=True\n)\ndef update_feature_list(s1, s2, s3, actp):\n ctx = dash.callback_context\n if not ctx.triggered or (s1 is None and s2 is None):\n raise PreventUpdate\n an = 'a1' if actp == 1 else 'a2'\n if an not in dbroot.adatas:\n raise PreventUpdate\n if 'adata' not in dbroot.adatas[an]:\n raise PreventUpdate\n adata = dbroot.adatas[an]['adata']\n\n if adata.shape[1] > 40000:\n error_msg = \"Too many features found. Skipping feature list.\"\n logger.warn(error_msg)\n to_return = [dash.no_update] * 4\n to_return[actp - 1] = to_return[actp + 1] = []\n return to_return + [\n _prep_notification(error_msg, \"warning\")]\n\n try:\n if 'gene_symbols' not in adata.var:\n cl_add_gene_symbol(adata)\n except Exception as e:\n logger.error(str(e))\n error_msg = \"Error occurred when adding gene names.\"\n logger.error(error_msg)\n return [dash.no_update] * 4 + [_prep_notification(error_msg, \"danger\")]\n\n features = adata.var['gene_symbols'].to_numpy().astype(str)\n unique_index = adata.var_names.to_numpy().astype(str)\n to_return = [dash.no_update] * 4\n to_return[actp - 1] = to_return[actp + 1] = [\n {'label': f, 'value': g}\n for f, g in zip(features, unique_index)]\n\n return to_return + [dash.no_update]\n\n\ndef get_update_other_features_func(an, prefix):\n def _func(n1):\n ctx = dash.callback_context\n if not ctx.triggered:\n raise PreventUpdate\n if an not in dbroot.adatas:\n raise PreventUpdate\n if 'adata' not in dbroot.adatas[an]:\n raise PreventUpdate\n adata = dbroot.adatas[an]['adata']\n special_keys = ['proteins', 'genes']\n\n other_feats = np.array([]).astype('U200')\n values = np.array([]).astype('U200')\n\n for key in special_keys:\n if key in adata.uns:\n vals = np.array(adata.uns[key]).astype('U200').flatten()\n if len(vals) > 40_000:\n logger.warn(f\"Too many features in {key}. Skipping...\")\n other_feats = np.concatenate([other_feats, vals])\n vals = np.char.add(key + \":\", vals)\n values = np.concatenate([values, vals])\n\n if len(other_feats) == 0:\n return [], dash.no_update\n return [{'label': f, 'value': g}\n for f, g in zip(other_feats, values)], dash.no_update\n\n return _func\n\n\nfor prefix, an in zip(['main', 'side'], ['a1', 'a2']):\n app.callback(\n Output(prefix + \"-other-feature-list\", \"options\"),\n MultiplexerOutput(\"push-notification\", \"data\"),\n\n Input(prefix + \"-other-features-change\", \"data\"),\n prevent_initial_call=True\n )(get_update_other_features_func(an, prefix))\n" ]
[ [ "numpy.round", "numpy.array", "numpy.concatenate", "numpy.char.add" ] ]
jthorton/geomeTRIC
[ "1cee4490c367f4b6e92c661808659231b377c727" ]
[ "geometric/tests/test_openmm.py" ]
[ "\"\"\"\nA set of tests for using the OpenMM engine\n\"\"\"\n\nimport copy\nimport numpy as np\nimport json, os, shutil\nfrom . import addons\nimport geometric\nimport pytest\nimport itertools\n\nlocalizer = addons.in_folder\ndatad = addons.datad\n\[email protected]_openmm\ndef test_dlc_openmm_water3(localizer):\n \"\"\"\n Optimize the geometry of three water molecules using standard delocalized internal coordinates. \n The coordinate system will break down and have to be rebuilt.\n \"\"\"\n progress = geometric.optimize.run_optimizer(openmm=True, pdb=os.path.join(datad,'water3.pdb'), coordsys='dlc', input='tip3p.xml')\n # The results here are in Angstrom\n # \n ref = np.array([[ 1.19172917, -1.71174316, 0.79961878],\n [ 1.73335403, -2.33032763, 0.30714483],\n [ 1.52818406, -0.83992919, 0.51498083],\n [-0.31618326, 0.13417074, 2.15241103],\n [ 0.07716192, -0.68377281, 1.79137674],\n [-0.98942711, -0.18943265, 2.75288307],\n [ 1.64949098, 0.96407596, 0.43451244],\n [ 0.91641967, 0.91098247, 1.07801098],\n [ 1.78727054, 1.90697627, 0.33206132]])\n e_ref = -0.0289248308\n xdiff = (progress.xyzs[-1] - ref).flatten()\n rmsd, maxd = geometric.optimize.calc_drms_dmax(progress.xyzs[-1], ref, align=True)\n print(\"RMS / Max displacement from reference:\", rmsd, maxd)\n # This test is a bit stochastic and doesn't converge to the same minimized geometry every time.\n # Check that the energy is 0.01 a.u. above reference. Not really the qm_energy, this is a misnomer\n assert progress.qm_energies[-1] < (e_ref + 0.01)\n # Check that the optimization converged in less than 300 steps\n assert len(progress) < 300\n # Check that the geometry matches the reference to within 0.03 RMS 0.05 max displacement\n assert rmsd < 0.03\n assert maxd < 0.05\n\[email protected]_openmm\ndef test_tric_openmm_water6(localizer):\n \"\"\"\n Optimize the geometry of six water molecules using translation-rotation internal coordinates. \n This optimization should be rather stable.\n \"\"\"\n progress = geometric.optimize.run_optimizer(openmm=True, pdb=os.path.join(datad,'water6.pdb'), input='tip3p.xml')\n ref = np.array([[ 1.32539118, -1.69049000, 0.75057673],\n [ 1.99955139, -2.21940859, 1.18382539],\n [ 1.57912690, -0.77129578, 0.98807568],\n [-1.38379009, -0.92093253, 0.85872908],\n [-0.51149071, -1.29985588, 1.04548492],\n [-1.20200783, 0.02764829, 0.79004674],\n [ 1.73099256, 0.87457076, 1.40125992],\n [ 0.91505292, 1.24056850, 1.00215384],\n [ 2.16539319, 1.65091617, 1.75997418],\n [-0.24547074, 1.01680646, -2.34970965],\n [-0.12968321, 1.07011697, -3.30048872],\n [-0.19545660, 0.05162253, -2.17758026],\n [-0.17266482, -1.56348902, -1.60782169],\n [ 0.53007928, -1.76265282, -0.96619742],\n [-0.96767413, -1.61898974, -1.05756209],\n [-0.55553348, 1.72499233, 0.21519466],\n [-0.41554802, 1.52125570, -0.73793694],\n [-1.23114402, 2.40806579, 0.18635652]])\n \n e_ref = -0.0777452561\n xdiff = (progress.xyzs[-1] - ref).flatten()\n rmsd, maxd = geometric.optimize.calc_drms_dmax(progress.xyzs[-1], ref, align=True)\n print(\"RMS / Max displacement from reference:\", rmsd, maxd)\n assert progress.qm_energies[-1] < (e_ref + 0.01)\n # Check that the optimization converged in less than 100 steps\n assert len(progress) < 100\n # Check that the geometry matches the reference\n assert rmsd < 0.03\n assert maxd < 0.05\n\[email protected]_openmm\ndef test_openmm_ala_scan(localizer):\n # Requires amber99sb.xml which ships with OpenMM\n m = geometric.optimize.run_optimizer(openmm=True, enforce=0.1, pdb=os.path.join(datad, 'ala_a99sb_min.pdb'), input='amber99sb.xml',\n constraints=os.path.join(datad, 'ala_constraints.txt'))\n scan_final = geometric.molecule.Molecule('scan-final.xyz')\n scan_energies = np.array([float(c.split()[-1]) for c in scan_final.comms])\n ref_energies = np.array([-0.03368698, -0.03349261])\n # Check converged energies\n assert np.allclose(scan_energies, ref_energies, atol=1.e-3)\n # Check for performance regression (should be done in ~33 cycles)\n assert len(m) < 50\n\[email protected]_openmm\ndef test_openmm_ala_scan_conmethod(localizer):\n # Requires amber99sb.xml which ships with OpenMM\n m = geometric.optimize.run_optimizer(openmm=True, conmethod=1, pdb=os.path.join(datad, 'ala_a99sb_min.pdb'), input='amber99sb.xml',\n constraints=os.path.join(datad, 'ala_constraints.txt'))\n scan_final = geometric.molecule.Molecule('scan-final.xyz')\n scan_energies = np.array([float(c.split()[-1]) for c in scan_final.comms])\n ref_energies = np.array([-0.03368698, -0.03349261])\n # Check converged energies\n assert np.allclose(scan_energies, ref_energies, atol=1.e-3)\n # Check for performance regression (should be done in ~62 cycles)\n assert len(m) < 100\n\[email protected]_openmm\ndef test_openmm_h2o2_h2o_grad_hess(localizer):\n M, engine = geometric.optimize.get_molecule_engine(openmm=True, pdb=os.path.join(datad, 'h2o2_h2o.pdb'), input=os.path.join(datad, 'h2o2_h2o_system.xml'))\n coords = M.xyzs[0].flatten() * geometric.nifty.ang2bohr\n IC = geometric.internal.DelocalizedInternalCoordinates(M, build=True, connect=False, addcart=False)\n Gq_ana, Gq_num = geometric.optimize.CheckInternalGrad(coords, M, IC.Prims, engine, 'h2o2_h2o.tmp', False)\n Hq_ana, Hq_num = geometric.optimize.CheckInternalHess(coords, M, IC.Prims, engine, 'h2o2_h2o.tmp', False)\n assert np.allclose(Gq_ana, Gq_num, atol=1e-5)\n assert np.allclose(Hq_ana, Hq_num, atol=1e-5)\n # constraints=os.path.join(datad, 'ala_constraints.txt')\n # m = geometric.optimize.run_optimizer(openmm=True, enforce=0.1, pdb=os.path.join(datad, 'ala_a99sb_min.pdb'), input='amber99sb.xml',\n # constraints=os.path.join(datad, 'ala_constraints.txt'))\n # scan_final = geometric.molecule.Molecule('scan-final.xyz')\n # scan_energies = np.array([float(c.split()[-1]) for c in scan_final.comms])\n # ref_energies = np.array([-0.03368698, -0.03349261])\n # # Check converged energies\n # assert np.allclose(scan_energies, ref_energies, atol=1.e-3)\n # # Check for performance regression (should be done in ~33 cycles)\n # assert len(m) < 50\n" ]
[ [ "numpy.array", "numpy.allclose" ] ]
dishen12/whale
[ "2d5bc3aaccab87ecebe31663d2d6c99d52563cc8" ]
[ "data_aug.py" ]
[ "from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nimport numpy as np\nimport os\n\ndatagen = ImageDataGenerator(\n rotation_range=30,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=False,\n vertical_flip = False,\n fill_mode='nearest')\n\ndef data_aug(image_dir):\n train_csv = os.path.join(image_dir,\"train.csv\")\n f = open(train_csv,\"r\")\n lines = f.readlines()\n np.random.shuffle(lines)\n images = []\n labels = []\n labels_all = []\n label_id = []\n occlude_class = [\"new_whale\"]\n save_dir = imageName = os.path.join(image_dir,\"train_aug_2\")\n for i,line in enumerate(lines):\n #if(i>2):continue\n if(i==0): continue\n temp = line.strip().split(\",\")\n image_name = temp[0]\n imageName = os.path.join(image_dir,\"train\",image_name)\n if(not os.path.exists(imageName)): continue\n print(i,imageName)\n label=temp[1]\n if(label in occlude_class): continue\n img = load_img(imageName) \n x = img_to_array(img) \n x = x.reshape((1,) + x.shape) \n i = 0\n for batch in datagen.flow(x, batch_size=1,\n save_to_dir=save_dir, save_prefix=image_name+\"_\"+label, save_format='jpg'):\n i += 1\n if i > 12:\n break \n print(\"end\")\n \ndata_aug(\"/nfs/project/whale/\")" ]
[ [ "numpy.random.shuffle" ] ]
etpr/sequential-manifold-planning
[ "a5f05d04c9ab954f39f0e8eecb3e90781564ffaa" ]
[ "psm/algorithms/psm.py" ]
[ "import numpy as np\nfrom scipy.linalg import null_space\nfrom math import ceil\nimport tqdm\nfrom psm.tasks import Task\nfrom psm.tree import Tree\nfrom psm.util import unit_ball_measure, is_on_manifold, get_volume, Projection\nfrom psm.manifolds import ManifoldStack, Manifold\n\n\nclass PSM:\n def __init__(self,\n task: Task,\n cfg: dict):\n self.name = 'PSM'\n self.n_manifolds = len(task.manifolds)\n self.task = task\n self.start = task.start\n self.n_samples = cfg['N']\n self.alpha = cfg['ALPHA']\n self.beta = cfg['BETA']\n self.eps = cfg['EPS']\n self.rho = cfg['RHO']\n self.r_max = cfg['R_MAX']\n self.collision_res = cfg['COLLISION_RES']\n self.d = task.d\n self.proj_step_size = cfg['PROJ_STEP_SIZE']\n\n self.lim_lo = task.lim_lo\n self.lim_up = task.lim_up\n self.gamma = np.power(2 * (1 + 1.0 / float(self.d)), 1.0 / float(self.d)) * \\\n np.power(get_volume(self.lim_lo, self.lim_up) / unit_ball_measure(self.d), 1. / float(self.d))\n\n self.Q_near_ids = []\n self.G = None\n self.G_list = []\n self.V_goal = []\n self.V_goal_list = []\n self.path = None\n self.path_id = None\n\n # check if start point is on first manifold\n if not is_on_manifold(task.manifolds[0], task.start, self.eps):\n raise Exception('The start point is not on the manifold h(start)= ' + str(task.manifolds[0].y(task.start)))\n\n # check if start point is in configuration space limits\n if not self.task.is_valid_conf(task.start):\n raise Exception('The start point is not in the system limits.')\n\n # check if start point is in collision\n if self.task.is_collision_conf(task.start):\n raise Exception('The start point is in collision.')\n\n def run(self) -> bool:\n # iterate over sequence of manifolds\n for n in range(self.n_manifolds - 1):\n print('######################################################')\n print('n', n)\n print('Active Manifold: ', self.task.manifolds[n].name)\n print('Target Manifold: ', self.task.manifolds[n+1].name)\n\n self.G = Tree(self.d, exact_nn=False)\n if n == 0:\n # init tree with start node\n self.G.add_node(node_id=0, node_value=self.start, node_cost=0.0, inc_cost=0.0)\n else:\n # init tree with transition nodes\n self.G.abstract_root = True\n self.G.add_node(node_id=0, node_value=np.ones(self.d) * np.inf, node_cost=0.0, inc_cost=0.0) # virtual root node\n for idx, v_id in enumerate(self.V_goal):\n node_id = self.G_list[-1].V[v_id]\n self.G.add_node(node_id=idx+1, node_value=node_id.value, node_cost=node_id.cost, inc_cost=node_id.cost)\n self.G.add_edge(edge_id=self.G.edge_count, node_a=0, node_b=idx+1)\n\n self.V_goal.clear()\n\n # check if an initial configuration fulfills the next constraint\n for v in self.G.V.values():\n if not np.isinf(v.value).any() and is_on_manifold(self.task.manifolds[n + 1], v.value):\n self.V_goal.append(v.id)\n\n if len(self.V_goal) == 0:\n self.grow_tree(curr_manifold=self.task.manifolds[n], next_manifold=self.task.manifolds[n + 1])\n\n print('number of nodes in tree_' + str(n) + ' = ' + str(len(self.G.V)))\n if len(self.V_goal) == 0:\n print('RRT extension did not reach any intersection nodes')\n return False\n else:\n print('number of goal nodes in tree_' + str(n) + ' = ' + str(len(self.V_goal)))\n\n # store results for later evaluation\n self.G_list.append(self.G)\n self.V_goal_list.append(self.V_goal.copy())\n\n # compute optimal path\n opt_idx = min(self.V_goal, key=lambda idx: np.linalg.norm(self.G.V[idx].cost))\n path_idx = self.G.comp_path(opt_idx)\n opt_path = [[self.G.V[idx].value for idx in path_idx]]\n opt_path_idx = [path_idx.copy()]\n opt_path_cost = self.G.V[opt_idx].cost\n\n for G, V_goal in zip(reversed(self.G_list[:-1]), reversed(self.V_goal_list[:-1])):\n opt_idx = V_goal[path_idx[0] - 1] # -1 offset due to virtual root node\n path_idx = G.comp_path(opt_idx)\n opt_path_idx.append(path_idx.copy())\n opt_path.append([G.V[idx].value for idx in path_idx])\n\n opt_path = list(reversed(opt_path))\n opt_path_idx = list(reversed(opt_path_idx))\n\n self.path = opt_path\n self.path_idx = opt_path_idx\n return True\n\n def steer(self,\n q_from: np.ndarray,\n q_from_id: int,\n q_to: np.ndarray,\n curr_manifold: Manifold,\n next_manifold: Manifold) -> np.ndarray:\n if np.random.rand() < self.beta and not self.G.V[q_from_id].con_extend:\n # steer towards next_manifolds\n self.G.V[q_from_id].con_extend = True\n yn = next_manifold.y(q_from)\n Jn = next_manifold.J(q_from)\n\n d = -Jn.T @ yn\n # project on current manifold\n J = null_space(curr_manifold.J(q_from))\n if J.shape[1] != 0:\n d = J @ J.T @ d\n\n else:\n # steer towards q_to\n d = (q_to - q_from)\n\n # project on current manifold\n J = null_space(curr_manifold.J(q_from))\n if J.shape[1] != 0:\n d = J @ J.T @ d\n\n if np.linalg.norm(d) > 0.0:\n q_new = q_from + self.alpha * d * (1.0 / np.linalg.norm(d))\n return q_new\n else:\n return None\n\n def is_collision(self,\n q_a: np.ndarray,\n q_b: np.ndarray) -> bool:\n N = int(ceil(np.linalg.norm(q_b - q_a) / self.collision_res))\n for i in range(N + 1):\n q = q_a if N == 0 else q_a + i / float(N) * (q_b - q_a)\n res = self.task.is_collision_conf(q)\n if res:\n return True\n return False\n\n def extend(self,\n q_from: np.ndarray,\n q_from_id: int,\n q_to: np.ndarray,\n q_to_id: int) -> bool:\n if self.is_collision(q_from, q_to):\n return False\n\n n = float(len(self.G.V))\n r = min([self.gamma * np.power(np.log(n) / n, 1.0 / self.d), self.alpha])\n\n self.Q_near_ids = self.G.get_nearest_neighbors(node_value=q_to, radius=r)\n\n c_min = self.G.V[q_from_id].cost + np.linalg.norm(q_from - q_to)\n c_min_inc = np.linalg.norm(q_from - q_to)\n q_min_idx = q_from_id\n for idx in self.Q_near_ids:\n q_idx = self.G.V[idx].value\n c_idx = self.G.V[idx].cost + np.linalg.norm(q_idx - q_to)\n\n if not self.is_collision(q_idx, q_to) and c_idx < c_min:\n c_min = c_idx\n c_min_inc = np.linalg.norm(q_idx - q_to)\n q_min_idx = idx\n\n self.G.add_node(node_id=q_to_id, node_value=q_to, node_cost=c_min, inc_cost=c_min_inc)\n self.G.add_edge(edge_id=self.G.edge_count, node_a=q_min_idx, node_b=q_to_id)\n\n return True\n\n def rewire(self,\n q_from: np.ndarray,\n q_from_id: int):\n for idx in self.Q_near_ids: # Q_near_ids was previously computed in extend function\n q_idx = self.G.V[idx].value\n c_idx = self.G.V[idx].cost\n c_new = self.G.V[q_from_id].cost + np.linalg.norm(q_from - q_idx)\n\n if not self.is_collision(q_from, q_idx) and c_new < c_idx:\n idx_parent = self.G.V[idx].parent\n self.G.remove_edge(idx_parent, idx)\n self.G.add_edge(edge_id=self.G.edge_count, node_a=q_from_id, node_b=idx)\n self.G.V[idx].cost = c_new\n self.G.V[idx].parent = q_from_id\n self.G.V[idx].inc_cost = np.linalg.norm(q_from - q_idx)\n self.G.update_child_costs(node_id=idx)\n\n def grow_tree(self,\n curr_manifold: Manifold,\n next_manifold: Manifold):\n curr_projector = Projection(f=curr_manifold.y, J=curr_manifold.J, step_size=self.proj_step_size)\n joint_manifold = ManifoldStack([curr_manifold, next_manifold])\n joint_projector = Projection(f=joint_manifold.y, J=joint_manifold.J, step_size=self.proj_step_size)\n\n pbar = tqdm.tqdm(total=self.n_samples)\n for i in range(self.n_samples):\n pbar.update()\n q_target = self.task.sample()\n q_near, q_near_id = self.G.get_nearest_neighbor(node_value=q_target)\n\n q_new = self.steer(q_near, q_near_id, q_target, curr_manifold, next_manifold)\n\n if q_new is None:\n continue\n\n # project q_new on current or next manifold\n on_next_manifold = False\n if np.linalg.norm(next_manifold.y(q_new)) < np.random.rand() * self.r_max:\n res, q_new_proj = joint_projector.project(q_new)\n else:\n res, q_new_proj = curr_projector.project(q_new)\n\n if not res:\n continue # continue if projection was not successful\n\n # check if q_new_proj is on the next manifold\n if is_on_manifold(next_manifold, q_new_proj, self.eps):\n if len(self.V_goal) == 0:\n on_next_manifold = True\n else:\n q_proj_near = min(self.V_goal, key=lambda idx: np.linalg.norm(self.G.V[idx].value - q_new_proj))\n if np.linalg.norm(self.G.V[q_proj_near].value - q_new_proj) > self.rho:\n on_next_manifold = True\n else:\n continue # continue if a node close to q_new_proj is already in the tree\n\n q_new_idx = self.G.node_count\n extended = self.extend(q_from=q_near, q_from_id=q_near_id, q_to=q_new_proj, q_to_id=q_new_idx)\n\n if extended:\n self.rewire(q_from=q_new_proj, q_from_id=q_new_idx)\n\n # add to V_goal if q_new is on the next manifold\n if on_next_manifold:\n self.V_goal.append(q_new_idx)\n\n pbar.close()\n print('')\n" ]
[ [ "numpy.log", "numpy.linalg.norm", "numpy.ones", "numpy.random.rand", "numpy.isinf" ] ]
GrayXu/SpectralNormalization-TF-Keras
[ "ed2c23ac5b6a98eb0f5eec9cec51dfe3f9128793" ]
[ "SNorm_tf_keras.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.engine import *\n# from tensorflow.keras.legacy import interfaces\nfrom tensorflow.keras import activations\nfrom tensorflow.keras import initializers\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras import constraints\n# from tensorflow.keras.utils.generic_utils import func_dump\n# from tensorflow.keras.utils.generic_utils import func_load\n# from tensorflow.keras.utils.generic_utils import deserialize_keras_object\n# from tensorflow.keras.utils.generic_utils import has_arg\n# from tensorflow.keras.utils import conv_utils\n\n# from tensorflow.keras.legacy import interfaces\nfrom tensorflow.keras.layers import Dense, Conv1D, Conv2D, Conv3D, Conv2DTranspose, Embedding\n\nimport itertools\nimport numpy as np\nfrom six.moves import range # pylint: disable=redefined-builtin\n\nimport tensorflow as tf\n\nclass DenseSN(Dense):\n def build(self, input_shape):\n assert len(input_shape) >= 2\n input_dim = input_shape[-1]\n self.kernel = self.add_weight(shape=(input_dim, self.units),\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.units,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.u = self.add_weight(shape=tuple([1, self.kernel.shape.as_list()[-1]]),\n initializer=initializers.RandomNormal(0, 1),\n name='sn',\n trainable=False)\n self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})\n self.built = True\n \n def call(self, inputs, training=None):\n def _l2normalize(v, eps=1e-12):\n return v / (K.sum(v ** 2) ** 0.5 + eps)\n def power_iteration(W, u):\n _u = u\n _v = _l2normalize(K.dot(_u, K.transpose(W)))\n _u = _l2normalize(K.dot(_v, W))\n return _u, _v\n W_shape = self.kernel.shape.as_list()\n #Flatten the Tensor\n W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]])\n _u, _v = power_iteration(W_reshaped, self.u)\n #Calculate Sigma\n sigma=K.dot(_v, W_reshaped)\n sigma=K.dot(sigma, K.transpose(_u))\n #normalize it\n W_bar = W_reshaped / sigma\n #reshape weight tensor\n if training in {0, False}:\n W_bar = K.reshape(W_bar, W_shape)\n else:\n with tf.control_dependencies([self.u.assign(_u)]):\n W_bar = K.reshape(W_bar, W_shape) \n output = K.dot(inputs, W_bar)\n if self.use_bias:\n output = K.bias_add(output, self.bias, data_format='channels_last')\n if self.activation is not None:\n output = self.activation(output)\n return output \n \nclass _ConvSN(Layer):\n\n def __init__(self, rank,\n filters,\n kernel_size,\n strides=1,\n padding='valid',\n data_format=None,\n dilation_rate=1,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n spectral_normalization=True,\n **kwargs):\n super(_ConvSN, self).__init__(**kwargs)\n self.rank = rank\n self.filters = filters\n self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')\n self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')\n self.padding = conv_utils.normalize_padding(padding)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(ndim=self.rank + 2)\n self.spectral_normalization = spectral_normalization\n self.u = None\n \n def _l2normalize(self, v, eps=1e-12):\n return v / (K.sum(v ** 2) ** 0.5 + eps)\n \n def power_iteration(self, u, W):\n '''\n Accroding the paper, we only need to do power iteration one time.\n '''\n v = self._l2normalize(K.dot(u, K.transpose(W)))\n u = self._l2normalize(K.dot(v, W))\n return u, v\n def build(self, input_shape):\n if self.data_format == 'channels_first':\n channel_axis = 1\n else:\n channel_axis = -1\n if input_shape[channel_axis] is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined. Found `None`.')\n input_dim = input_shape[channel_axis]\n kernel_shape = self.kernel_size + (input_dim, self.filters)\n\n self.kernel = self.add_weight(shape=kernel_shape,\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n\n #Spectral Normalization\n if self.spectral_normalization:\n self.u = self.add_weight(shape=tuple([1, self.kernel.shape.as_list()[-1]]),\n initializer=initializers.RandomNormal(0, 1),\n name='sn',\n trainable=False)\n \n if self.use_bias:\n self.bias = self.add_weight(shape=(self.filters,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n # Set input spec.\n self.input_spec = InputSpec(ndim=self.rank + 2,\n axes={channel_axis: input_dim})\n self.built = True\n\n def call(self, inputs):\n def _l2normalize(v, eps=1e-12):\n return v / (K.sum(v ** 2) ** 0.5 + eps)\n def power_iteration(W, u):\n _u = u\n _v = _l2normalize(K.dot(_u, K.transpose(W)))\n _u = _l2normalize(K.dot(_v, W))\n return _u, _v\n \n if self.spectral_normalization:\n W_shape = self.kernel.shape.as_list()\n #Flatten the Tensor\n W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]])\n _u, _v = power_iteration(W_reshaped, self.u)\n #Calculate Sigma\n sigma=K.dot(_v, W_reshaped)\n sigma=K.dot(sigma, K.transpose(_u))\n #normalize it\n W_bar = W_reshaped / sigma\n #reshape weight tensor\n if training in {0, False}:\n W_bar = K.reshape(W_bar, W_shape)\n else:\n with tf.control_dependencies([self.u.assign(_u)]):\n W_bar = K.reshape(W_bar, W_shape)\n\n #update weitht\n self.kernel = W_bar\n \n if self.rank == 1:\n outputs = K.conv1d(\n inputs,\n self.kernel,\n strides=self.strides[0],\n padding=self.padding,\n data_format=self.data_format,\n dilation_rate=self.dilation_rate[0])\n if self.rank == 2:\n outputs = K.conv2d(\n inputs,\n self.kernel,\n strides=self.strides,\n padding=self.padding,\n data_format=self.data_format,\n dilation_rate=self.dilation_rate)\n if self.rank == 3:\n outputs = K.conv3d(\n inputs,\n self.kernel,\n strides=self.strides,\n padding=self.padding,\n data_format=self.data_format,\n dilation_rate=self.dilation_rate)\n\n if self.use_bias:\n outputs = K.bias_add(\n outputs,\n self.bias,\n data_format=self.data_format)\n\n if self.activation is not None:\n return self.activation(outputs)\n return outputs\n\n def compute_output_shape(self, input_shape):\n if self.data_format == 'channels_last':\n space = input_shape[1:-1]\n new_space = []\n for i in range(len(space)):\n new_dim = conv_utils.conv_output_length(\n space[i],\n self.kernel_size[i],\n padding=self.padding,\n stride=self.strides[i],\n dilation=self.dilation_rate[i])\n new_space.append(new_dim)\n return (input_shape[0],) + tuple(new_space) + (self.filters,)\n if self.data_format == 'channels_first':\n space = input_shape[2:]\n new_space = []\n for i in range(len(space)):\n new_dim = conv_utils.conv_output_length(\n space[i],\n self.kernel_size[i],\n padding=self.padding,\n stride=self.strides[i],\n dilation=self.dilation_rate[i])\n new_space.append(new_dim)\n return (input_shape[0], self.filters) + tuple(new_space)\n\n def get_config(self):\n config = {\n 'rank': self.rank,\n 'filters': self.filters,\n 'kernel_size': self.kernel_size,\n 'strides': self.strides,\n 'padding': self.padding,\n 'data_format': self.data_format,\n 'dilation_rate': self.dilation_rate,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint)\n }\n base_config = super(_Conv, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n \nclass ConvSN2D(Conv2D):\n\n def build(self, input_shape):\n if self.data_format == 'channels_first':\n channel_axis = 1\n else:\n channel_axis = -1\n if input_shape[channel_axis] is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined. Found `None`.')\n input_dim = input_shape[channel_axis]\n kernel_shape = self.kernel_size + (input_dim, self.filters)\n\n self.kernel = self.add_weight(shape=kernel_shape,\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.filters,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n \n self.u = self.add_weight(shape=tuple([1, self.kernel.shape.as_list()[-1]]),\n initializer=initializers.RandomNormal(0, 1),\n name='sn',\n trainable=False)\n \n # Set input spec.\n self.input_spec = InputSpec(ndim=self.rank + 2,\n axes={channel_axis: input_dim})\n self.built = True\n def call(self, inputs, training=None):\n def _l2normalize(v, eps=1e-12):\n return v / (K.sum(v ** 2) ** 0.5 + eps)\n def power_iteration(W, u):\n #Accroding the paper, we only need to do power iteration one time.\n _u = u\n _v = _l2normalize(K.dot(_u, K.transpose(W)))\n _u = _l2normalize(K.dot(_v, W))\n return _u, _v\n #Spectral Normalization\n W_shape = self.kernel.shape.as_list()\n #Flatten the Tensor\n W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]])\n _u, _v = power_iteration(W_reshaped, self.u)\n #Calculate Sigma\n sigma=K.dot(_v, W_reshaped)\n sigma=K.dot(sigma, K.transpose(_u))\n #normalize it\n W_bar = W_reshaped / sigma\n #reshape weight tensor\n if training in {0, False}:\n W_bar = K.reshape(W_bar, W_shape)\n else:\n with tf.control_dependencies([self.u.assign(_u)]):\n W_bar = K.reshape(W_bar, W_shape)\n \n outputs = K.conv2d(\n inputs,\n W_bar,\n strides=self.strides,\n padding=self.padding,\n data_format=self.data_format,\n dilation_rate=self.dilation_rate)\n if self.use_bias:\n outputs = K.bias_add(\n outputs,\n self.bias,\n data_format=self.data_format)\n if self.activation is not None:\n return self.activation(outputs)\n return outputs\n \nclass ConvSN1D(Conv1D):\n \n def build(self, input_shape):\n if self.data_format == 'channels_first':\n channel_axis = 1\n else:\n channel_axis = -1\n if input_shape[channel_axis] is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined. Found `None`.')\n input_dim = input_shape[channel_axis]\n kernel_shape = self.kernel_size + (input_dim, self.filters)\n\n self.kernel = self.add_weight(shape=kernel_shape,\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.filters,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n \n self.u = self.add_weight(shape=tuple([1, self.kernel.shape.as_list()[-1]]),\n initializer=initializers.RandomNormal(0, 1),\n name='sn',\n trainable=False)\n # Set input spec.\n self.input_spec = InputSpec(ndim=self.rank + 2,\n axes={channel_axis: input_dim})\n self.built = True\n \n def call(self, inputs, training=None):\n def _l2normalize(v, eps=1e-12):\n return v / (K.sum(v ** 2) ** 0.5 + eps)\n def power_iteration(W, u):\n #Accroding the paper, we only need to do power iteration one time.\n _u = u\n _v = _l2normalize(K.dot(_u, K.transpose(W)))\n _u = _l2normalize(K.dot(_v, W))\n return _u, _v\n #Spectral Normalization\n W_shape = self.kernel.shape.as_list()\n #Flatten the Tensor\n W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]])\n _u, _v = power_iteration(W_reshaped, self.u)\n #Calculate Sigma\n sigma=K.dot(_v, W_reshaped)\n sigma=K.dot(sigma, K.transpose(_u))\n #normalize it\n W_bar = W_reshaped / sigma\n #reshape weight tensor\n if training in {0, False}:\n W_bar = K.reshape(W_bar, W_shape)\n else:\n with tf.control_dependencies([self.u.assign(_u)]):\n W_bar = K.reshape(W_bar, W_shape)\n \n outputs = K.conv1d(\n inputs,\n W_bar,\n strides=self.strides,\n padding=self.padding,\n data_format=self.data_format,\n dilation_rate=self.dilation_rate)\n if self.use_bias:\n outputs = K.bias_add(\n outputs,\n self.bias,\n data_format=self.data_format)\n if self.activation is not None:\n return self.activation(outputs)\n return outputs\n\nclass ConvSN3D(Conv3D): \n def build(self, input_shape):\n if self.data_format == 'channels_first':\n channel_axis = 1\n else:\n channel_axis = -1\n if input_shape[channel_axis] is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined. Found `None`.')\n input_dim = input_shape[channel_axis]\n kernel_shape = self.kernel_size + (input_dim, self.filters)\n\n self.kernel = self.add_weight(shape=kernel_shape,\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n \n self.u = self.add_weight(shape=tuple([1, self.kernel.shape.as_list()[-1]]),\n initializer=initializers.RandomNormal(0, 1),\n name='sn',\n trainable=False)\n \n if self.use_bias:\n self.bias = self.add_weight(shape=(self.filters,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n # Set input spec.\n self.input_spec = InputSpec(ndim=self.rank + 2,\n axes={channel_axis: input_dim})\n self.built = True\n\n def call(self, inputs, training=None):\n def _l2normalize(v, eps=1e-12):\n return v / (K.sum(v ** 2) ** 0.5 + eps)\n def power_iteration(W, u):\n #Accroding the paper, we only need to do power iteration one time.\n _u = u\n _v = _l2normalize(K.dot(_u, K.transpose(W)))\n _u = _l2normalize(K.dot(_v, W))\n return _u, _v\n #Spectral Normalization\n W_shape = self.kernel.shape.as_list()\n #Flatten the Tensor\n W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]])\n _u, _v = power_iteration(W_reshaped, self.u)\n #Calculate Sigma\n sigma=K.dot(_v, W_reshaped)\n sigma=K.dot(sigma, K.transpose(_u))\n #normalize it\n W_bar = W_reshaped / sigma\n #reshape weight tensor\n if training in {0, False}:\n W_bar = K.reshape(W_bar, W_shape)\n else:\n with tf.control_dependencies([self.u.assign(_u)]):\n W_bar = K.reshape(W_bar, W_shape)\n \n outputs = K.conv3d(\n inputs,\n W_bar,\n strides=self.strides,\n padding=self.padding,\n data_format=self.data_format,\n dilation_rate=self.dilation_rate)\n if self.use_bias:\n outputs = K.bias_add(\n outputs,\n self.bias,\n data_format=self.data_format)\n if self.activation is not None:\n return self.activation(outputs)\n return outputs\n\n \nclass EmbeddingSN(Embedding):\n \n def build(self, input_shape):\n self.embeddings = self.add_weight(\n shape=(self.input_dim, self.output_dim),\n initializer=self.embeddings_initializer,\n name='embeddings',\n regularizer=self.embeddings_regularizer,\n constraint=self.embeddings_constraint,\n dtype=self.dtype)\n \n self.u = self.add_weight(shape=tuple([1, self.embeddings.shape.as_list()[-1]]),\n initializer=initializers.RandomNormal(0, 1),\n name='sn',\n trainable=False)\n \n self.built = True\n \n def call(self, inputs):\n if K.dtype(inputs) != 'int32':\n inputs = K.cast(inputs, 'int32')\n \n def _l2normalize(v, eps=1e-12):\n return v / (K.sum(v ** 2) ** 0.5 + eps)\n def power_iteration(W, u):\n #Accroding the paper, we only need to do power iteration one time.\n _u = u\n _v = _l2normalize(K.dot(_u, K.transpose(W)))\n _u = _l2normalize(K.dot(_v, W))\n return _u, _v\n W_shape = self.embeddings.shape.as_list()\n #Flatten the Tensor\n W_reshaped = K.reshape(self.embeddings, [-1, W_shape[-1]])\n _u, _v = power_iteration(W_reshaped, self.u)\n #Calculate Sigma\n sigma=K.dot(_v, W_reshaped)\n sigma=K.dot(sigma, K.transpose(_u))\n #normalize it\n W_bar = W_reshaped / sigma\n #reshape weight tensor\n if training in {0, False}:\n W_bar = K.reshape(W_bar, W_shape)\n else:\n with tf.control_dependencies([self.u.assign(_u)]):\n W_bar = K.reshape(W_bar, W_shape)\n self.embeddings = W_bar\n \n out = K.gather(self.embeddings, inputs)\n return out \n\nclass ConvSN2DTranspose(Conv2DTranspose):\n\n def build(self, input_shape):\n if len(input_shape) != 4:\n raise ValueError('Inputs should have rank ' +\n str(4) +\n '; Received input shape:', str(input_shape))\n if self.data_format == 'channels_first':\n channel_axis = 1\n else:\n channel_axis = -1\n if input_shape[channel_axis] is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined. Found `None`.')\n input_dim = input_shape[channel_axis]\n kernel_shape = self.kernel_size + (self.filters, input_dim)\n\n self.kernel = self.add_weight(shape=kernel_shape,\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.filters,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n \n self.u = self.add_weight(shape=tuple([1, self.kernel.shape.as_list()[-1]]),\n initializer=initializers.RandomNormal(0, 1),\n name='sn',\n trainable=False)\n \n # Set input spec.\n self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})\n self.built = True \n \n def call(self, inputs):\n input_shape = K.shape(inputs)\n batch_size = input_shape[0]\n if self.data_format == 'channels_first':\n h_axis, w_axis = 2, 3\n else:\n h_axis, w_axis = 1, 2\n\n height, width = input_shape[h_axis], input_shape[w_axis]\n kernel_h, kernel_w = self.kernel_size\n stride_h, stride_w = self.strides\n if self.output_padding is None:\n out_pad_h = out_pad_w = None\n else:\n out_pad_h, out_pad_w = self.output_padding\n\n # Infer the dynamic output shape:\n out_height = conv_utils.deconv_length(height,\n stride_h, kernel_h,\n self.padding,\n out_pad_h)\n out_width = conv_utils.deconv_length(width,\n stride_w, kernel_w,\n self.padding,\n out_pad_w)\n if self.data_format == 'channels_first':\n output_shape = (batch_size, self.filters, out_height, out_width)\n else:\n output_shape = (batch_size, out_height, out_width, self.filters)\n \n #Spectral Normalization \n def _l2normalize(v, eps=1e-12):\n return v / (K.sum(v ** 2) ** 0.5 + eps)\n def power_iteration(W, u):\n #Accroding the paper, we only need to do power iteration one time.\n _u = u\n _v = _l2normalize(K.dot(_u, K.transpose(W)))\n _u = _l2normalize(K.dot(_v, W))\n return _u, _v\n W_shape = self.kernel.shape.as_list()\n #Flatten the Tensor\n W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]])\n _u, _v = power_iteration(W_reshaped, self.u)\n #Calculate Sigma\n sigma=K.dot(_v, W_reshaped)\n sigma=K.dot(sigma, K.transpose(_u))\n #normalize it\n W_bar = W_reshaped / sigma\n #reshape weight tensor\n if training in {0, False}:\n W_bar = K.reshape(W_bar, W_shape)\n else:\n with tf.control_dependencies([self.u.assign(_u)]):\n W_bar = K.reshape(W_bar, W_shape)\n self.kernel = W_bar\n \n outputs = K.conv2d_transpose(\n inputs,\n self.kernel,\n output_shape,\n self.strides,\n padding=self.padding,\n data_format=self.data_format)\n\n if self.use_bias:\n outputs = K.bias_add(\n outputs,\n self.bias,\n data_format=self.data_format)\n\n if self.activation is not None:\n return self.activation(outputs)\n return outputs\n\n\n# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities used by convolution layers.\n\"\"\"\n\ndef convert_data_format(data_format, ndim):\n if data_format == 'channels_last':\n if ndim == 3:\n return 'NWC'\n elif ndim == 4:\n return 'NHWC'\n elif ndim == 5:\n return 'NDHWC'\n else:\n raise ValueError('Input rank not supported:', ndim)\n elif data_format == 'channels_first':\n if ndim == 3:\n return 'NCW'\n elif ndim == 4:\n return 'NCHW'\n elif ndim == 5:\n return 'NCDHW'\n else:\n raise ValueError('Input rank not supported:', ndim)\n else:\n raise ValueError('Invalid data_format:', data_format)\n\n\ndef normalize_tuple(value, n, name):\n \"\"\"Transforms a single integer or iterable of integers into an integer tuple.\n\n Arguments:\n value: The value to validate and convert. Could an int, or any iterable\n of ints.\n n: The size of the tuple to be returned.\n name: The name of the argument being validated, e.g. \"strides\" or\n \"kernel_size\". This is only used to format error messages.\n\n Returns:\n A tuple of n integers.\n\n Raises:\n ValueError: If something else than an int/long or iterable thereof was\n passed.\n \"\"\"\n if isinstance(value, int):\n return (value,) * n\n else:\n try:\n value_tuple = tuple(value)\n except TypeError:\n raise ValueError('The `' + name + '` argument must be a tuple of ' +\n str(n) + ' integers. Received: ' + str(value))\n if len(value_tuple) != n:\n raise ValueError('The `' + name + '` argument must be a tuple of ' +\n str(n) + ' integers. Received: ' + str(value))\n for single_value in value_tuple:\n try:\n int(single_value)\n except (ValueError, TypeError):\n raise ValueError('The `' + name + '` argument must be a tuple of ' +\n str(n) + ' integers. Received: ' + str(value) + ' '\n 'including element ' + str(single_value) + ' of type' +\n ' ' + str(type(single_value)))\n return value_tuple\n\n\ndef conv_output_length(input_length, filter_size, padding, stride, dilation=1):\n \"\"\"Determines output length of a convolution given input length.\n\n Arguments:\n input_length: integer.\n filter_size: integer.\n padding: one of \"same\", \"valid\", \"full\", \"causal\"\n stride: integer.\n dilation: dilation rate, integer.\n\n Returns:\n The output length (integer).\n \"\"\"\n if input_length is None:\n return None\n assert padding in {'same', 'valid', 'full', 'causal'}\n dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)\n if padding in ['same', 'causal']:\n output_length = input_length\n elif padding == 'valid':\n output_length = input_length - dilated_filter_size + 1\n elif padding == 'full':\n output_length = input_length + dilated_filter_size - 1\n return (output_length + stride - 1) // stride\n\n\ndef conv_input_length(output_length, filter_size, padding, stride):\n \"\"\"Determines input length of a convolution given output length.\n\n Arguments:\n output_length: integer.\n filter_size: integer.\n padding: one of \"same\", \"valid\", \"full\".\n stride: integer.\n\n Returns:\n The input length (integer).\n \"\"\"\n if output_length is None:\n return None\n assert padding in {'same', 'valid', 'full'}\n if padding == 'same':\n pad = filter_size // 2\n elif padding == 'valid':\n pad = 0\n elif padding == 'full':\n pad = filter_size - 1\n return (output_length - 1) * stride - 2 * pad + filter_size\n\n\ndef deconv_output_length(input_length, filter_size, padding,\n output_padding=None, stride=0, dilation=1):\n \"\"\"Determines output length of a transposed convolution given input length.\n\n Arguments:\n input_length: Integer.\n filter_size: Integer.\n padding: one of `\"same\"`, `\"valid\"`, `\"full\"`.\n output_padding: Integer, amount of padding along the output dimension.\n Can be set to `None` in which case the output length is inferred.\n stride: Integer.\n dilation: Integer.\n\n Returns:\n The output length (integer).\n \"\"\"\n assert padding in {'same', 'valid', 'full'}\n if input_length is None:\n return None\n\n # Get the dilated kernel size\n filter_size = filter_size + (filter_size - 1) * (dilation - 1)\n\n # Infer length if output padding is None, else compute the exact length\n if output_padding is None:\n if padding == 'valid':\n length = input_length * stride + max(filter_size - stride, 0)\n elif padding == 'full':\n length = input_length * stride - (stride + filter_size - 2)\n elif padding == 'same':\n length = input_length * stride\n\n else:\n if padding == 'same':\n pad = filter_size // 2\n elif padding == 'valid':\n pad = 0\n elif padding == 'full':\n pad = filter_size - 1\n\n length = ((input_length - 1) * stride + filter_size - 2 * pad +\n output_padding)\n return length\n\n\ndef normalize_data_format(value):\n if value is None:\n value = K.image_data_format()\n data_format = value.lower()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('The `data_format` argument must be one of '\n '\"channels_first\", \"channels_last\". Received: ' +\n str(value))\n return data_format\n\n\ndef normalize_padding(value):\n if isinstance(value, (list, tuple)):\n return value\n padding = value.lower()\n if padding not in {'valid', 'same', 'causal'}:\n raise ValueError('The `padding` argument must be a list/tuple or one of '\n '\"valid\", \"same\" (or \"causal\", only for `Conv1D). '\n 'Received: ' + str(padding))\n return padding\n\n\ndef convert_kernel(kernel):\n \"\"\"Converts a Numpy kernel matrix from Theano format to TensorFlow format.\n\n Also works reciprocally, since the transformation is its own inverse.\n\n Arguments:\n kernel: Numpy array (3D, 4D or 5D).\n\n Returns:\n The converted kernel.\n\n Raises:\n ValueError: in case of invalid kernel shape or invalid data_format.\n \"\"\"\n kernel = np.asarray(kernel)\n if not 3 <= kernel.ndim <= 5:\n raise ValueError('Invalid kernel shape:', kernel.shape)\n slices = [slice(None, None, -1) for _ in range(kernel.ndim)]\n no_flip = (slice(None, None), slice(None, None))\n slices[-2:] = no_flip\n return np.copy(kernel[slices])\n\n\ndef conv_kernel_mask(input_shape, kernel_shape, strides, padding):\n \"\"\"Compute a mask representing the connectivity of a convolution operation.\n\n Assume a convolution with given parameters is applied to an input having N\n spatial dimensions with `input_shape = (d_in1, ..., d_inN)` to produce an\n output with shape `(d_out1, ..., d_outN)`. This method returns a boolean array\n of shape `(d_in1, ..., d_inN, d_out1, ..., d_outN)` with `True` entries\n indicating pairs of input and output locations that are connected by a weight.\n\n Example:\n ```python\n >>> input_shape = (4,)\n >>> kernel_shape = (2,)\n >>> strides = (1,)\n >>> padding = \"valid\"\n >>> conv_kernel_mask(input_shape, kernel_shape, strides, padding)\n array([[ True, False, False],\n [ True, True, False],\n [False, True, True],\n [False, False, True]], dtype=bool)\n ```\n where rows and columns correspond to inputs and outputs respectively.\n\n\n Args:\n input_shape: tuple of size N: `(d_in1, ..., d_inN)`,\n spatial shape of the input.\n kernel_shape: tuple of size N, spatial shape of the convolutional kernel\n / receptive field.\n strides: tuple of size N, strides along each spatial dimension.\n padding: type of padding, string `\"same\"` or `\"valid\"`.\n\n Returns:\n A boolean 2N-D `np.ndarray` of shape\n `(d_in1, ..., d_inN, d_out1, ..., d_outN)`, where `(d_out1, ..., d_outN)`\n is the spatial shape of the output. `True` entries in the mask represent\n pairs of input-output locations that are connected by a weight.\n\n Raises:\n ValueError: if `input_shape`, `kernel_shape` and `strides` don't have the\n same number of dimensions.\n NotImplementedError: if `padding` is not in {`\"same\"`, `\"valid\"`}.\n \"\"\"\n if padding not in {'same', 'valid'}:\n raise NotImplementedError('Padding type %s not supported. '\n 'Only \"valid\" and \"same\" '\n 'are implemented.' % padding)\n\n in_dims = len(input_shape)\n if isinstance(kernel_shape, int):\n kernel_shape = (kernel_shape,) * in_dims\n if isinstance(strides, int):\n strides = (strides,) * in_dims\n\n kernel_dims = len(kernel_shape)\n stride_dims = len(strides)\n if kernel_dims != in_dims or stride_dims != in_dims:\n raise ValueError('Number of strides, input and kernel dimensions must all '\n 'match. Received: %d, %d, %d.' %\n (stride_dims, in_dims, kernel_dims))\n\n output_shape = conv_output_shape(input_shape, kernel_shape, strides, padding)\n\n mask_shape = input_shape + output_shape\n mask = np.zeros(mask_shape, np.bool)\n\n output_axes_ticks = [range(dim) for dim in output_shape]\n for output_position in itertools.product(*output_axes_ticks):\n input_axes_ticks = conv_connected_inputs(input_shape,\n kernel_shape,\n output_position,\n strides,\n padding)\n for input_position in itertools.product(*input_axes_ticks):\n mask[input_position + output_position] = True\n\n return mask\n\n\ndef conv_connected_inputs(input_shape,\n kernel_shape,\n output_position,\n strides,\n padding):\n \"\"\"Return locations of the input connected to an output position.\n\n Assume a convolution with given parameters is applied to an input having N\n spatial dimensions with `input_shape = (d_in1, ..., d_inN)`. This method\n returns N ranges specifying the input region that was convolved with the\n kernel to produce the output at position\n `output_position = (p_out1, ..., p_outN)`.\n\n Example:\n ```python\n >>> input_shape = (4, 4)\n >>> kernel_shape = (2, 1)\n >>> output_position = (1, 1)\n >>> strides = (1, 1)\n >>> padding = \"valid\"\n >>> conv_connected_inputs(input_shape, kernel_shape, output_position,\n >>> strides, padding)\n [xrange(1, 3), xrange(1, 2)]\n ```\n Args:\n input_shape: tuple of size N: `(d_in1, ..., d_inN)`,\n spatial shape of the input.\n kernel_shape: tuple of size N, spatial shape of the convolutional kernel\n / receptive field.\n output_position: tuple of size N: `(p_out1, ..., p_outN)`,\n a single position in the output of the convolution.\n strides: tuple of size N, strides along each spatial dimension.\n padding: type of padding, string `\"same\"` or `\"valid\"`.\n\n Returns:\n N ranges `[[p_in_left1, ..., p_in_right1], ...,\n [p_in_leftN, ..., p_in_rightN]]` specifying the region in the\n input connected to output_position.\n \"\"\"\n ranges = []\n\n ndims = len(input_shape)\n for d in range(ndims):\n left_shift = int(kernel_shape[d] / 2)\n right_shift = kernel_shape[d] - left_shift\n\n center = output_position[d] * strides[d]\n\n if padding == 'valid':\n center += left_shift\n\n start = max(0, center - left_shift)\n end = min(input_shape[d], center + right_shift)\n\n ranges.append(range(start, end))\n\n return ranges\n\n\ndef conv_output_shape(input_shape, kernel_shape, strides, padding):\n \"\"\"Return the output shape of an N-D convolution.\n\n Forces dimensions where input is empty (size 0) to remain empty.\n\n Args:\n input_shape: tuple of size N: `(d_in1, ..., d_inN)`,\n spatial shape of the input.\n kernel_shape: tuple of size N, spatial shape of the convolutional kernel\n / receptive field.\n strides: tuple of size N, strides along each spatial dimension.\n padding: type of padding, string `\"same\"` or `\"valid\"`.\n\n Returns:\n tuple of size N: `(d_out1, ..., d_outN)`, spatial shape of the output.\n \"\"\"\n dims = range(len(kernel_shape))\n output_shape = [conv_output_length(input_shape[d],\n kernel_shape[d],\n padding,\n strides[d])\n for d in dims]\n output_shape = tuple([0 if input_shape[d] == 0 else output_shape[d]\n for d in dims])\n return output_shape\n" ]
[ [ "numpy.asarray", "tensorflow.keras.constraints.serialize", "tensorflow.keras.regularizers.serialize", "tensorflow.keras.backend.dtype", "tensorflow.keras.backend.gather", "tensorflow.keras.backend.reshape", "numpy.copy", "numpy.zeros", "tensorflow.keras.initializers.get", "tensorflow.keras.backend.conv2d_transpose", "tensorflow.keras.backend.conv3d", "tensorflow.keras.backend.bias_add", "tensorflow.keras.backend.image_data_format", "tensorflow.keras.backend.dot", "tensorflow.keras.backend.sum", "tensorflow.keras.initializers.serialize", "tensorflow.keras.backend.conv2d", "tensorflow.keras.constraints.get", "tensorflow.keras.activations.serialize", "tensorflow.keras.regularizers.get", "tensorflow.keras.backend.cast", "tensorflow.keras.backend.transpose", "tensorflow.keras.backend.conv1d", "tensorflow.keras.backend.shape", "tensorflow.keras.activations.get", "tensorflow.keras.initializers.RandomNormal" ] ]
satvikmashkaria/CS748_project
[ "57185b6a467e638d6db96cf1c7d3dbe8d6bf5032" ]
[ "cswor/runner.py" ]
[ "from mod_copeland import sample_complexity\n\n\nargs = {}\n# args['heuristic'] = 'random'\nargs['heuristic'] = 'greedy'\nargs['n_voters'] = 1000\nargs['alpha'] = 0.05\n# args['probs'] = [0.4, 0.45, 0.5, 0.7, 0.8]\n# args['probs'] = [0.1, 0.2, 0.3, 0.4, 0.5]\nargs['probs'] = [0.1 + 0.05*i for i in range(10)]\nprint(\"ppp\", args['probs'])\nargs['seed'] = 42\nargs['ques_limit'] = 5\ngammas = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]\n# gammas = [0.0]\ngreedy_itrs = []\nrandom_itrs = []\nseeds = [2, 3, 4]\n# seeds = [0]\n\n# for seed in seeds:\n# args['seed'] = seed\n# itr, winner = sample_complexity(args)\n# print(\"seed\", seed, \"itr\", itr, \"winner\", winner)\n# random_itrs.append(itr)\n#\n# print(random_itrs)\n# print(sum(random_itrs)/6)\n\n# for seed in seeds:\n# seed_vals = []\n# args['seed'] = seed\n# for gamma in gammas:\n# print(\"Gamma\", gamma, \"started\")\n# args['gamma'] = gamma\n# itr, winner = sample_complexity(args)\n# print(\"seed\", seed, \"itr\", itr, \"winner\", winner)\n# seed_vals.append(itr)\n# greedy_itrs.append(seed_vals)\n# #\n# print(greedy_itrs)\n# print(sample_complexity(args))\n\n\ngreedy_itrs = [[257, 307, 377, 424, 297, 453], [252, 303, 377, 424, 297, 453], [251, 307, 377, 424, 297, 453], [254, 307, 377, 424, 297, 453], [254, 307, 377, 424, 297, 453], [254, 307, 377, 424, 297, 453]]\n\nprint([sum(i)/6 for i in greedy_itrs])\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_theme()\n\n# plt.plot(gammas, [sum(i)/6 for i in greedy_itrs])\n# plt.xlabel(\"Gamma\")\n# plt.ylabel(\"Average sample complexity\")\n# plt.title(\"Variation with Gamma\")\n# plt.show()\n\nres = [[649, 496, 496, 496, 496, 496], [565, 496, 496, 496, 496, 496], [524, 747, 782, 526, 526, 526]]\n\ndef suml(l1, l2):\n return [(l1[i] + l2[i]) for i in range(len(l1))]\n\nresf = suml(suml(res[0], res[1]), res[2])\nresf = [i/3 for i in resf]\n\nimport matplotlib.pyplot as plt\n\nplt.plot(gammas, resf)\nplt.xlabel(\"Gamma Values\")\nplt.ylabel(\"Sample complexity averaged over 3 seeds\")\nplt.savefig(\"gamma_variation.png\")\nplt.show()" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
cmshobe/grass-scripts
[ "5d593f301fd708252e5a69b7304ce28ea8490506" ]
[ "delineate_watersheds.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 19 14:57:29 2018\n\n@author: charlie\n\ncreate watersheds from pour point shapefile 'cleaned_pour_points'\n\"\"\"\nimport numpy as np\nimport grass.script as grass\nfrom grass.pygrass.modules.shortcuts import general as g\nfrom grass.pygrass.modules.shortcuts import vector as v\nfrom grass.pygrass.modules.shortcuts import raster as r\n\ngrass.use_temp_region() #save the region so I can zoom in on rasters\n\n#first, export all pour points to ascii file to be read in for wshed creation\n\ncleaned_pour_points = 'cleaned_pour_points'\nout_file = 'cleaned_pour_points.csv'\n\nv.out_ascii(input=cleaned_pour_points, layer=1, type='point', \n output=out_file, format='point', separator=',')\n\n#now, loop through those pour points and create a watershed for each one\npoints = np.genfromtxt(out_file, delimiter=',') #import csv\nx_coords = points[:, 0]\ny_coords = points[:, 1]\nn_points = len(x_coords)\n\n#set input flow direction map\nflow_dir_map = 'small_crop_buffer_flowdir'\nunique_names_list = []#use this later for patching\nfor point in range(n_points):#n_points\n #dynamically set output name to avoid overwriting wsheds\n wshed_name = 'wshed_' + str(point) \n print('Extracting watershed ' + str(point + 1) + ' of ' + str(n_points))\n\n #get coordinates into right form\n x_coord = x_coords[point]\n y_coord = y_coords[point]\n coords = [x_coord, y_coord]\n \n #extract one watershed\n r.water_outlet(input=flow_dir_map, output=wshed_name, \n coordinates=coords)\n \n #temporarily zoomthe computational region to the raster of interest\n g.region(flags='p', raster=wshed_name, zoom=wshed_name)\n \n #use r.null as an intermediate step to creating vector watersheds\n r.null(map=wshed_name, setnull=0) \n \n #now use r.recode to give each raster a unique value\n #write a rules file\n rules_file = 'wshed_' + str(point) + '_recode_rules.txt'\n file = open(rules_file, \"w\")\n file.write('1:1:' + str(point) + ':' + str(point))\n file.close()\n \n #use r.recode to find the rules file and recode the basin\n recoded_name = 'wshed_' + str(point) + '_unique'\n r.recode(overwrite=True, input=wshed_name, output=recoded_name, \n rules=rules_file)\n \n #save unique name for patching outside of the loop\n unique_names_list.append(recoded_name)\n \n grass.del_temp_region() #revert to full region\n \n#now once they've all been extracted, nulled, and recoded, we patch the basins\npatched_raster = 'all_unique_wsheds_rast'\nr.patch(input=unique_names_list, output=patched_raster)\n\n#now turn them into a vector areas file\nuncleaned_vector = 'all_unique_wsheds_vect'\nr.to_vect(input=patched_raster, output=uncleaned_vector, type='area')\n\n#now clean the vector polygons file to get ride of weird tiny basins\ncleaned_vector = 'all_unique_wsheds_cleaned'\nv.clean(input=uncleaned_vector, output=cleaned_vector, tool='rmarea',\n threshold=1000000)\n\n#now add a column to the attribute table to hold drainage area in square km\ncolumn_name_and_type = 'drainage_area_sqkm DOUBLE PRECISION'\nv.db_addcolumn(map=cleaned_vector, columns=column_name_and_type)\n\n#finally, calculate polygon areas and use them to populate the new column\ncolumn_name = 'drainage_area_sqkm'\nv.to_db(map=cleaned_vector, option='area', columns=column_name, \n units='kilometers')\n\n#and we're done!" ]
[ [ "numpy.genfromtxt" ] ]
eilon123/CROSR-main
[ "c94b117008d41c39751d2c6cfc4e043e4d8167c6" ]
[ "libMR/setup.py" ]
[ "from distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\nfrom Cython.Build import cythonize\nimport sys\nimport numpy\n#ext_modules = [Extension(\"libmr\", [\"libmr.pyx\", \"MetaRecognition.cpp\"])]\n\nsetup(\n ext_modules = cythonize(Extension('libmr',\n [\"libmr.pyx\",\n \"MetaRecognition.cpp\",\n \"weibull.c\"\n ],\n include_dirs = [\".\", numpy.get_include()],\n language=\"c++\",\n )),\n data_files = [('.', ['MetaRecognition.h', 'weibull.h'])],\n\n)\n" ]
[ [ "numpy.get_include" ] ]
fgulan/machine-learning-fer
[ "d37142a1f00fd04f1454b43c1b4bb02b0dcf179b" ]
[ "LAB2/mlutils.py" ]
[ "import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef plot_2d_clf_problem(X, y, h=None):\r\n '''\r\n Plots a two-dimensional labeled dataset (X,y) and, if function h(x) is given, \r\n the decision surfaces.\r\n '''\r\n assert X.shape[1] == 2, \"Dataset is not two-dimensional\"\r\n if h!=None : \r\n # Create a mesh to plot in\r\n r = 0.02 # mesh resolution\r\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\r\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, r),\r\n np.arange(y_min, y_max, r))\r\n XX=np.c_[xx.ravel(), yy.ravel()]\r\n try:\r\n Z_test = h(XX)\r\n if Z_test.shape == ():\r\n # h returns a scalar when applied to a matrix; map explicitly\r\n Z = np.array(map(h,XX))\r\n else :\r\n Z = Z_test\r\n except ValueError:\r\n # can't apply to a matrix; map explicitly\r\n Z = np.array(map(h,XX))\r\n # Put the result into a color plot\r\n Z = Z.reshape(xx.shape)\r\n plt.contourf(xx, yy, Z, cmap=plt.cm.Pastel1)\r\n\r\n # Plot the dataset\r\n plt.scatter(X[:,0],X[:,1], c=y, cmap=plt.cm.Paired, marker='o', s=50);\r\n" ]
[ [ "numpy.arange", "matplotlib.pyplot.contourf", "matplotlib.pyplot.scatter" ] ]
kowaalczyk/image-cluster
[ "2d034dd154491da8b22ee6662c9dcf9500283186" ]
[ "image_cluster/pipeline/pipeline.py" ]
[ "from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin\nfrom sklearn.metrics import silhouette_score, calinski_harabaz_score\nimport numpy as np\n\nfrom image_cluster.pipeline.utils import VerboseMixin\nfrom image_cluster.pipeline.model_factory import ModelFactory\nfrom image_cluster.types import Score\n\n\nclass Pipeline(BaseEstimator, ClusterMixin, VerboseMixin):\n \"\"\"\n Wrapper for preprocessing pipeline, clustering model,\n metric calculation and postprocessing.\n \"\"\"\n def __init__(\n self,\n preprocessing: TransformerMixin,\n model_factory: ModelFactory,\n postprocessing: TransformerMixin,\n verbose: bool = False\n ):\n self.preprocessing = preprocessing\n self.model_factory = model_factory\n self.postprocessing = postprocessing\n self.verbose = verbose\n\n def fit(self, X):\n \"\"\"\n Builds a model and fits it on preprocessed data.\n After fitting, postprocessing is performed,\n however predict and fit_predict methods return model labels\n (without postprocessing) to keep consistency with\n scikt-learn estimators API.\n \"\"\"\n self._log(\"Preprocessing...\")\n self.preprocessed_ = self.preprocessing.fit_transform(X)\n self._log(\"Creating model...\")\n self.n_samples_ = len(self.preprocessed_)\n self.model_ = self.model_factory(self.n_samples_)\n self._log(\"Fitting model...\")\n self.model_.fit(self.preprocessed_)\n self.labels_ = self.model_.fit_predict(self.preprocessed_)\n self._log(\"Postprocessing...\")\n self.postprocessed_ = self.postprocessing.fit_transform(\n self.labels_\n )\n return self\n\n def score(self):\n self._log(\"Scoring...\")\n normal_labels = self.labels_[self.labels_ >= 0]\n label_value_counts = np.bincount(normal_labels)\n outliers = self.labels_[self.labels_ == -1]\n self.score_ = Score(\n silhouette_score=silhouette_score(\n self.preprocessed_,\n self.labels_\n ),\n calinski_harabaz_score=calinski_harabaz_score(\n self.preprocessed_,\n self.labels_\n ),\n n_samples=len(self.preprocessed_),\n n_clusters=self.model_.n_clusters,\n n_outliers=len(outliers),\n label_size_min=np.min(label_value_counts),\n label_size_max=np.max(label_value_counts),\n label_size_mean=np.mean(label_value_counts),\n label_size_var=np.var(label_value_counts)\n )\n self._log(self.score_)\n return self.score_\n" ]
[ [ "numpy.min", "sklearn.metrics.silhouette_score", "sklearn.metrics.calinski_harabaz_score", "numpy.max", "numpy.mean", "numpy.bincount", "numpy.var" ] ]
oguzserbetci/EffiCare
[ "4a81b6ddcff6b3b7d37dd8a6a8cd0419d742709e" ]
[ "models/encoders.py" ]
[ "import torch\nimport torch.nn\n\nfrom models.modules import *\nfrom utils import load_class\n\n\nclass EventEncoder(nn.Module):\n def __init__(self, vocab, freeze_emb, emb_dim, include_time, n_bins):\n super().__init__()\n input_size = emb_dim\n input_size += n_bins\n if include_time:\n input_size += 2\n\n self.vocab = vocab\n if vocab.vectors is not None:\n self.encoder = nn.Embedding.from_pretrained(vocab.vectors, freeze=freeze_emb, padding_idx=0)\n else:\n self.encoder = nn.Embedding(len(vocab), emb_dim, padding_idx=0)\n if freeze_emb:\n self.encoder.weight.requires_grad = False\n\n bins = torch.eye(n_bins)\n bins = torch.cat([torch.zeros(1, n_bins), bins], 0)\n self.bins = nn.Parameter(bins, requires_grad=False)\n\n self.include_time = include_time\n\n def forward(self, input):\n if input.shape[-1] == 100:\n emb = self.encoder(input)\n else:\n emb = self.encoder(input[:, :, 0])\n arr = [emb]\n if self.include_time:\n time_input = torch.arange(len(input)).float()[:, None, None].expand(input.shape[:2] + (1,)).to(next(self.parameters()).device)\n arr.extend([torch.log(time_input + 1), torch.exp(time_input/1000) - 1])\n sep_bins = self.bins[input[:, :, 1].long()]\n arr.append(sep_bins)\n emb = torch.cat(arr, -1) # N, L, C\n return emb\n\n\nclass TimestepEncoder(nn.Module):\n def __init__(self, joint_vocab, tables, freeze_emb=True, emb_dim=100, include_time=True, n_bins=9, hidden_size=50,\n timestep_modelcls='models.LinearMaxMeanSumPool', freeze_encoders=False,\n **kwargs):\n super().__init__()\n\n input_size = emb_dim\n if include_time:\n input_size += 2\n input_size += n_bins\n\n self.event_encoder = EventEncoder(joint_vocab, freeze_emb, emb_dim, include_time, n_bins)\n\n models = []\n\n self.out_size = 0\n for _ in tables:\n model = load_class(timestep_modelcls)(input_size, hidden_size=hidden_size, **kwargs)\n models.append(model)\n self.out_size += model.out_size\n self.models = nn.ModuleList(models)\n\n if freeze_encoders:\n for param in self.parameters():\n param.requires_grad = False\n\n self.tables = tables\n\n def forward(self, *inputs: Tensor):\n '''\n Input:\n inputs: Tensors for table with shape: N, L, C\n Output:\n Tensor of timestep embeddings with shape: N, L, C\n '''\n out = []\n measure_inds = {}\n for i, (table, input, model) in enumerate(zip(self.tables, inputs, self.models)):\n batch_size = input.size(0)\n input = input.reshape((-1, ) + input.shape[2:]) # NxL1, L2, C\n input = self.event_encoder(input)\n input, measure_ind = model(input) # NxL, C\n measure_inds[table.table] = measure_ind\n\n input = input.reshape((batch_size, -1) + input.shape[1:]) # N, L, C\n out.append(input)\n\n return out, {'measure_inds': measure_inds}\n\n\nclass DeepTimestepEncoder(nn.Module):\n def __init__(self, joint_vocab, tables, freeze_emb=True, emb_dim=100, include_time=True, n_bins=9, hidden_size=50,\n timestep_modelcls='models.LinearMaxMeanSumPool', freeze_encoders=False,\n pat_hidden_size=128,\n **kwargs):\n super().__init__()\n\n input_size = emb_dim\n input_size += n_bins\n if include_time:\n input_size += 2\n\n self.event_encoder = EventEncoder(joint_vocab, freeze_emb, emb_dim, include_time, n_bins)\n self.model = load_class(timestep_modelcls)(input_size, hidden_size=hidden_size, **kwargs)\n self.mixer = nn.Sequential(nn.Linear(len(tables) * self.model.out_size, self.model.out_size),\n nn.ReLU(),\n nn.Linear(self.model.out_size, pat_hidden_size),\n nn.ReLU())\n\n for param in self.model.parameters():\n param.requires_grad = False\n\n if freeze_encoders:\n for param in self.parameters():\n param.requires_grad = False\n\n self.out_size = pat_hidden_size\n self.tables = tables\n\n def forward(self, *inputs: Tensor):\n '''\n Input:\n input: Tensors for table with shape: N, L, C\n Output:\n Tensor of timestep embeddings with shape: N, L, C\n '''\n out = []\n measure_inds = {}\n for table, input in zip(self.tables, inputs):\n batch_size = input.size(0)\n input = input.reshape((-1, ) + input.shape[2:]) # NxL1, L2, C\n input = self.event_encoder(input)\n input, measure_ind = self.model(input) # NxL, C\n measure_inds[table.table] = measure_ind\n\n input = input.reshape((batch_size, -1) + input.shape[1:]) # N, L, C\n out.append(input)\n\n timesteps = torch.cat(out, -1)\n timesteps = self.mixer(timesteps)\n return timesteps, {'measure_inds': measure_inds}\n\n\nclass PatientPoolEncoder(nn.Module):\n def __init__(self, input_size, tables, pat_padaware=False, timestep_modelcls='models.LinearMaxMeanSumPool', include_demfc=True, include_dem=True, dem_size=8, dem_dropout=.0, visit_dropout=.0, freeze_encoders=False, **otherkw):\n super().__init__()\n\n if include_dem and include_demfc:\n self.dem_fc = nn.Sequential(nn.Linear(dem_size, 40),\n nn.ReLU(),\n nn.Dropout(dem_dropout),\n nn.Linear(40, 20),\n nn.ReLU()\n )\n dem_size = 20\n elif include_dem:\n pass\n else:\n dem_size = 0\n\n\n if freeze_encoders:\n for param in self.parameters():\n param.requires_grad = False\n\n self.include_dem = include_dem\n self.include_demfc = include_demfc\n self.out_size = 3 * input_size + dem_size + 2\n self.padaware = pat_padaware\n self.tables = tables\n\n def forward(self, input, dem):\n '''\n Input:\n input: Tensor of timestep embeddings with shape: N, L, C\n dem: Tensor of demographics with shape: N, C\n Output:\n Tensor of patient embeddings for each timestep with shape: N, L, C\n '''\n patient = []\n time_inds = {}\n activations = {}\n for inp, table in zip(input, self.tables):\n N = inp.shape[0]\n L = inp.shape[1]\n\n inp = F.pad(inp.contiguous(), (0, 0, L-1, 0, 0, 0)) # N, L', C\n inp = inp.transpose(1, 2).contiguous() # N, C, L\n\n p_max, p_max_ind = F.max_pool1d(inp, L, 1, return_indices=True) # N, C, L\n p_max_ind = (p_max_ind - (L - 1)).detach()\n\n # Collect max activations and indices\n time_inds[table.table] = p_max_ind\n activations[table.table] = p_max.detach()\n\n p_max = p_max.transpose(1, 2).contiguous() # N, L, C\n if self.padaware:\n p_avg = nonzero_avg_pool1d(inp, L, 1).transpose(1, 2).contiguous() # N, L, C\n else:\n p_avg = F.avg_pool1d(inp, L, 1).transpose(1, 2).contiguous() # N, L, C\n p_sum = norm_sum_pool1d(inp, L, 1).transpose(1, 2).contiguous() # N, L, C\n\n patient_timesteps = torch.cat([p_max, p_avg, p_sum], -1) # N, L, C'\n patient.append(patient_timesteps)\n\n patient_embedding = patient\n if self.include_dem:\n if self.include_demfc:\n dem = self.dem_fc(dem)\n dem = dem.unsqueeze(1).expand(N, L, -1) # N, L, C\n patient_embedding.append(dem)\n\n timestep = torch.arange(L, dtype=torch.float).to(dem.device).unsqueeze(1).unsqueeze(0).expand(N, -1, -1) # N, L, C\n time_input = [torch.log(timestep + 1), torch.exp(timestep/1000) - 1]\n\n patient_embedding += time_input\n\n patient_timesteps = torch.cat(patient_embedding, 2) # N, L, C'\n\n return patient_timesteps, {'time_inds': time_inds,\n 'activations': activations}\n\n\nclass PatientRNNEncoder(nn.Module):\n def __init__(self, input_size, include_dem=True, include_demfc=True, dem_size=10, dem_dropout=.0, pat_hidden_size=128, visit_dropout=.0, pat_layers=1, freeze_encoders=False, **otherkw):\n super().__init__()\n if include_dem:\n if include_demfc:\n self.dem_fc = nn.Sequential(nn.Linear(dem_size, 40),\n nn.ReLU(),\n nn.Dropout(dem_dropout),\n nn.Linear(40, 20),\n nn.ReLU()\n )\n dem_size = 20\n input_size += dem_size\n self.rnn = nn.GRU(input_size, pat_hidden_size, pat_layers, dropout=visit_dropout, batch_first=True)\n\n if freeze_encoders:\n for param in self.parameters():\n param.requires_grad = False\n\n self.include_dem = include_dem\n self.include_demfc = include_demfc\n self.out_size = pat_hidden_size\n\n def forward(self, timesteps, dem=None):\n '''\n Input:\n input: Tensor of timestep embeddings with shape: N, L, C\n dem: Tensor of demographics with shape: N, C\n Output:\n Tensor of patient embeddings for each timestep with shape: N, L, C\n '''\n\n if self.include_dem:\n N = timesteps.shape[0]\n L = timesteps.shape[1]\n if self.include_demfc:\n dem = self.dem_fc(dem)\n dem = dem.unsqueeze(1).expand(N, L, -1) # N, L, C\n\n timesteps = torch.cat([timesteps, dem], -1) # N, L, C'\n\n timesteps, _ = self.rnn(timesteps)\n\n # L = timestep_dem.shape[1]\n\n # input = F.pad(timestep_dem.contiguous(), (0, 0, L-1, 0)) # N, L', C\n # input = input.transpose(1, 2).contiguous() # N, C, L\n # p_avg = nonzero_avg_pool1d(input, L, 1)\n # p_avg = p_avg.transpose(1, 2).contiguous() # N, L, C\n\n # out = torch.cat([timestep_dem, p_avg], dim=2)\n\n # return F.relu(p_avg), {}\n return timesteps, {}\n\n\nclass PatientMeanEncoder(nn.Module):\n def __init__(self, input_size, include_demfc=True, dem_size=10, dem_dropout=.0, pat_hidden_size=128, visit_dropout=.0, pat_layers=1, freeze_encoders=False, **otherkw):\n super().__init__()\n if include_demfc:\n self.dem_fc = nn.Sequential(nn.Linear(dem_size, 40),\n nn.ReLU(),\n nn.Dropout(dem_dropout),\n nn.Linear(40, 20),\n nn.ReLU()\n )\n dem_size = 20\n\n input_size += dem_size\n\n if freeze_encoders:\n for param in self.parameters():\n param.requires_grad = False\n\n self.include_demfc = include_demfc\n self.out_size = input_size\n\n def forward(self, timesteps, dem):\n '''\n Input:\n input: Tensor of timestep embeddings with shape: N, L, C\n dem: Tensor of demographics with shape: N, C\n Output:\n Tensor of patient embeddings for each timestep with shape: N, L, C\n '''\n N = timesteps.shape[0]\n L = timesteps.shape[1]\n if self.include_demfc:\n dem = self.dem_fc(dem)\n dem = dem.unsqueeze(1).expand(N, L, -1) # N, L, C\n\n timestep_dem = torch.cat([timesteps, dem], -1) # N, L, C'\n\n L = timestep_dem.shape[1]\n\n input = F.pad(timestep_dem.contiguous(), (0, 0, L-1, 0)) # N, L', C\n input = input.transpose(1, 2).contiguous() # N, C, L\n p_avg = nonzero_avg_pool1d(input, L, 1)\n p_avg = p_avg.transpose(1, 2).contiguous() # N, L, C\n\n # out = torch.cat([timestep_dem, p_avg], dim=2)\n\n return F.relu(p_avg), {}\n\n\nclass PatientCNNEncoder(nn.Module):\n def __init__(self, input_size, include_demfc=True, dem_size=10, dem_dropout=.0, pat_hidden_size=128, visit_dropout=.0, pat_layers=1, freeze_encoders=False, **otherkw):\n super().__init__()\n if include_demfc:\n self.dem_fc = nn.Sequential(nn.Linear(dem_size, 40),\n nn.ReLU(),\n nn.Dropout(dem_dropout),\n nn.Linear(40, 20),\n nn.ReLU()\n )\n dem_size = 20\n\n input_size += dem_size\n self.cnn = DeepCNN(input_size, pat_hidden_size, visit_dropout)\n\n if freeze_encoders:\n for param in self.parameters():\n param.requires_grad = False\n\n self.include_demfc = include_demfc\n self.out_size = pat_hidden_size * 2\n\n def forward(self, timesteps, dem):\n '''\n Input:\n input: Tensor of timestep embeddings with shape: N, L, C\n dem: Tensor of demographics with shape: N, C\n Output:\n Tensor of patient embeddings for each timestep with shape: N, L, C\n '''\n N = timesteps.shape[0]\n L = timesteps.shape[1]\n if self.include_demfc:\n dem = self.dem_fc(dem)\n dem = dem.unsqueeze(1).expand(N, L, -1) # N, L, C\n\n timestep_dem = torch.cat([timesteps, dem], -1) # N, L, C'\n\n timestep_dem = self.cnn(timestep_dem.transpose(1,2).contiguous()).transpose(1,2).contiguous()\n\n # L = timestep_dem.shape[1]\n\n # input = F.pad(timestep_dem.contiguous(), (0, 0, L-1, 0, 0, 0)) # N, L', C\n # input = input.transpose(1, 2).contiguous() # N, C, L\n # p_avg = nonzero_avg_pool1d(input, L, 1)\n # p_avg = p_avg.transpose(1, 2).contiguous() # N, L, C\n\n # out = torch.cat([timestep_dem, p_avg], dim=2)\n\n return F.relu(timestep_dem), {}\n\n\nclass PatientRETAINEncoder(nn.Module):\n def __init__(self, input_size, include_dem=True, include_demfc=True, dem_size=10, dem_dropout=.0, pat_hidden_size=128, visit_dropout=.0, pat_layers=1, freeze_encoders=False, **otherkw):\n super().__init__()\n if include_dem:\n if include_demfc:\n self.dem_fc = nn.Sequential(nn.Linear(dem_size, 40),\n nn.ReLU(),\n nn.Dropout(dem_dropout),\n nn.Linear(40, 20),\n nn.ReLU()\n )\n dem_size = 20\n input_size += dem_size\n\n # Visit-level attention\n self.a_rnn = nn.GRU(input_size, pat_hidden_size, pat_layers, dropout=visit_dropout, batch_first=True)\n self.a_fc = nn.Linear(pat_hidden_size, 1)\n\n # Variable-level attention\n self.b_rnn = nn.GRU(input_size, pat_hidden_size, pat_layers, dropout=visit_dropout, batch_first=True)\n self.b_fc = nn.Linear(pat_hidden_size, input_size)\n\n if freeze_encoders:\n for param in self.parameters():\n param.requires_grad = False\n\n self.include_demfc = include_demfc\n self.include_dem = include_dem\n self.out_size = input_size\n\n def forward(self, timesteps, dem):\n '''\n Input:\n input: Tensor of timestep embeddings with shape: N, L, C\n dem: Tensor of demographics with shape: N, C\n Output:\n Tensor of patient embeddings for each timestep with shape: N, L, C\n '''\n if self.include_dem:\n N = timesteps.shape[0]\n L = timesteps.shape[1]\n if self.include_demfc:\n dem = self.dem_fc(dem)\n dem = dem.unsqueeze(1).expand(N, L, -1) # N, L, C\n\n timesteps = torch.cat([timesteps, dem], -1) # N, L, C'\n\n alphas, _ = self.a_rnn(timesteps)\n alphas = self.a_fc(alphas)\n alphas = F.tanh(alphas)\n\n betas, _ = self.b_rnn(timesteps)\n betas = self.b_fc(betas)\n betas = F.tanh(betas)\n\n timesteps = timesteps * alphas * betas\n\n L = timesteps.shape[1]\n input = F.pad(timesteps.contiguous(), (0, 0, L-1, 0)) # N, L', C\n input = input.transpose(1, 2).contiguous() # N, C, L\n out = norm_sum_pool1d(input, L, 1)\n out = out.transpose(1, 2).contiguous() # N, L, C\n\n return out, {}\n" ]
[ [ "torch.zeros", "torch.cat", "torch.eye", "torch.exp", "torch.log", "torch.arange" ] ]
gmg-utn/elementos_finitos
[ "5f8b11886d94d926fb358ad6344b079c5cb63e4e" ]
[ "ejemplos/ejemplo3.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nEjemplo 3: aproximación de una función\n\nJupyter notebook\n \nautor: Ramiro Irastorza\n\"\"\"\n\n\nfrom __future__ import print_function\nimport numpy as np #importo numpy y lo denomino np\nimport matplotlib.pyplot as plt\n\n#Definimos la función sombrero\ndef phi(x,xi,i):\n hi = xi[1]-xi[0]\n f = np.zeros_like(x)\n for m in range(len(x)):\n if i == 0:\n if xi[i] <= x[m] < xi[i+1]:\n f[m] = (xi[i+1]-x[m])/(xi[i+1]-xi[i])\n else:\n f[m] = 0.0 \n elif i == len(xi):\n if (xi[i-1] < x[m] <= xi[i]):\n f[m] = (x[m]-xi[i-1])/(xi[i]-xi[i-1])\n else:\n f[m] = 0.0\n else:\n if (xi[i-1] < x[m] <= xi[i]):\n f[m] = (x[m]-xi[i-1])/(xi[i]-xi[i-1])\n elif xi[i] < x[m] <= xi[i+1]:\n f[m] = (xi[i+1]-x[m])/(xi[i+1]-xi[i])\n else:\n f[m] = 0.0\n \n return f\n\n\n#Puntos de x0 a xnx\nnx = 5 #numero de intervalos\nnodos = nx+1 #cantidad de nodos\nx = np.linspace(0,1,200) #continuo\ny = 2.0*x*np.sin(2.0*np.pi*x)+3.0 #funcion\nxi = np.linspace(0,1,nodos) #nodos equiespaciados\nvi = 2.0*xi*np.sin(2.0*np.pi*xi)+3.0 #valores en los nodos\n\nv = vi[0]*phi(x,xi,0)+vi[1]*phi(x,xi,1)+vi[2]*phi(x,xi,2)+vi[3]*phi(x,xi,3)+vi[4]*phi(x,xi,4)+vi[5]*phi(x,xi,5)\n\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\nfig, axs = plt.subplots(1,1)\naxs.plot(xi,vi,'ro',markersize=10)\naxs.plot(x,v,'r')\naxs.plot(x,y,'b')\naxs.fill_between(x, y, v)\naxs.set_ylim(-1,5)\naxs.axhline(0, color='gray')\naxs.vlines(xi[0],vi[0],0,linestyles='dashed')\naxs.annotate(r'$x_{0}$', xy=(xi[0]-0.02, -0.5),fontsize=16)\naxs.vlines(xi[-1],vi[-1],0,linestyles='dashed')\naxs.annotate(r'$x_{n}$', xy=(xi[-1]-0.02, -0.5),fontsize=16)\n\nplt.show()\n\n\n\n" ]
[ [ "numpy.linspace", "matplotlib.pyplot.rc", "matplotlib.pyplot.subplots", "numpy.sin", "numpy.zeros_like", "matplotlib.pyplot.show" ] ]
haoyudong-97/tg2019task
[ "4f31968cc49105d13bc95487136dc3ae986bf4d1" ]
[ "knn.py" ]
[ "#!/usr/bin/env python3\n\nimport os\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.metrics.pairwise import cosine_distances\n\n\ndef read_explanations(path):\n header = []\n uid = None\n\n df = pd.read_csv(path, sep='\\t', dtype=str)\n\n for name in df.columns:\n if name.startswith('[SKIP]'):\n if 'UID' in name and not uid:\n uid = name\n else:\n header.append(name)\n\n if not uid or len(df) == 0:\n warnings.warn('Possibly misformatted file: ' + path)\n return []\n\n return df.apply(lambda r: (r[uid], ' '.join(str(s) for s in list(r[header]) if not pd.isna(s))), 1).tolist()\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--nearest', type=int, default=10)\n parser.add_argument('tables')\n parser.add_argument('questions', type=argparse.FileType('r', encoding='UTF-8'))\n args = parser.parse_args()\n\n explanations = []\n\n for path, _, files in os.walk(args.tables):\n for file in files:\n explanations += read_explanations(os.path.join(path, file))\n\n if not explanations:\n warnings.warn('Empty explanations')\n\n df_q = pd.read_csv(args.questions, sep='\\t', dtype=str)\n df_e = pd.DataFrame(explanations, columns=('uid', 'text'))\n\n vectorizer = TfidfVectorizer().fit(df_q['Question']).fit(df_e['text'])\n #vectorizer = TfidfVectorizer().fit(df_q['Question']).fit(df_e['text'])\n #vectorizer = CountVectorizer().fit(df_q['Question']).fit(df_e['text'])\n X_q = vectorizer.transform(df_q['Question'])\n X_e = vectorizer.transform(df_e['text'])\n X_dist = cosine_distances(X_q, X_e)\n\n for i_question, distances in enumerate(X_dist):\n for i_explanation in np.argsort(distances)[:args.nearest]:\n print('{}\\t{}'.format(df_q.loc[i_question]['questionID'], df_e.loc[i_explanation]['uid']))\n\n\nif '__main__' == __name__:\n main()\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "sklearn.metrics.pairwise.cosine_distances", "numpy.argsort", "pandas.isna", "sklearn.feature_extraction.text.TfidfVectorizer" ] ]
sinaure/server
[ "e214447ae534a472bf8aaeff0e8024a3dc936ad2" ]
[ "qa/L0_sequence_stress/sequence_stress.py" ]
[ "# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of NVIDIA CORPORATION nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport sys\nsys.path.append(\"../common\")\n\nimport argparse\nfrom builtins import range\nfrom builtins import str\nfrom future.utils import iteritems\nimport os\nimport time\nimport threading\nimport traceback\nimport numpy as np\nimport test_util as tu\nfrom functools import partial\nimport tritongrpcclient as grpcclient\nfrom tritonclientutils import np_to_triton_dtype\n\nif sys.version_info >= (3, 0):\n import queue\nelse:\n import Queue as queue\n\nFLAGS = None\nCORRELATION_ID_BLOCK_SIZE = 100\nDEFAULT_TIMEOUT_MS = 5000\nSEQUENCE_LENGTH_MEAN = 16\nSEQUENCE_LENGTH_STDEV = 8\n\n_thread_exceptions = []\n_thread_exceptions_mutex = threading.Lock()\n\n\nclass UserData:\n\n def __init__(self):\n self._completed_requests = queue.Queue()\n\n\n# Callback function used for async_stream_infer()\ndef completion_callback(user_data, result, error):\n # passing error raise and handling out\n user_data._completed_requests.put((result, error))\n\n\nclass TimeoutException(Exception):\n pass\n\n\ndef check_sequence_async(client_metadata,\n trial,\n model_name,\n input_dtype,\n steps,\n timeout_ms=DEFAULT_TIMEOUT_MS,\n sequence_name=\"<unknown>\"):\n \"\"\"Perform sequence of inferences using async run. The 'steps' holds\n a list of tuples, one for each inference with format:\n\n (flag_str, value, expected_result, delay_ms)\n\n \"\"\"\n if ((\"savedmodel\" in trial) or (\"graphdef\" in trial) or\n (\"custom\" in trial) or (\"plan\" in trial)):\n tensor_shape = (\n 1,\n 1,\n )\n else:\n assert False, \"unknown trial type: \" + trial\n\n triton_client = client_metadata[0]\n sequence_id = client_metadata[1]\n\n # Execute the sequence of inference...\n seq_start_ms = int(round(time.time() * 1000))\n user_data = UserData()\n # Ensure there is no running stream\n triton_client.stop_stream()\n triton_client.start_stream(partial(completion_callback, user_data))\n\n sent_count = 0\n for flag_str, value, expected_result, delay_ms in steps:\n seq_start = False\n seq_end = False\n if flag_str is not None:\n seq_start = (\"start\" in flag_str)\n seq_end = (\"end\" in flag_str)\n\n if input_dtype == np.object:\n in0 = np.full(tensor_shape, value, dtype=np.int32)\n in0n = np.array([str(x) for x in in0.reshape(in0.size)],\n dtype=object)\n in0 = in0n.reshape(tensor_shape)\n else:\n in0 = np.full(tensor_shape, value, dtype=input_dtype)\n inputs = [\n grpcclient.InferInput(\"INPUT\", tensor_shape,\n np_to_triton_dtype(input_dtype)),\n ]\n inputs[0].set_data_from_numpy(in0)\n\n triton_client.async_stream_infer(model_name,\n inputs,\n sequence_id=sequence_id,\n sequence_start=seq_start,\n sequence_end=seq_end)\n sent_count += 1\n\n if delay_ms is not None:\n time.sleep(delay_ms / 1000.0)\n\n # Process the results in order that they were sent\n result = None\n processed_count = 0\n while processed_count < sent_count:\n (results, error) = user_data._completed_requests.get()\n if error is not None:\n raise error\n\n (_, value, expected, _) = steps[processed_count]\n processed_count += 1\n if timeout_ms != None:\n now_ms = int(round(time.time() * 1000))\n if (now_ms - seq_start_ms) > timeout_ms:\n raise TimeoutException(\n \"Timeout expired for {}\".format(sequence_name))\n\n result = results.as_numpy(\"OUTPUT\")[0][0]\n if FLAGS.verbose:\n print(\"{} {}: + {} = {}\".format(sequence_name, sequence_id, value,\n result))\n\n if expected is not None:\n if input_dtype == np.object:\n assert int(\n result\n ) == expected, \"{}: expected result {}, got {}\".format(\n sequence_name, expected, int(result))\n else:\n assert result == expected, \"{}: expected result {}, got {}\".format(\n sequence_name, expected, result)\n triton_client.stop_stream()\n\n\ndef get_datatype(trial):\n # Get the datatype to use based on what models are available (see test.sh)\n if (\"plan\" in trial) or (\"savedmodel\" in trial):\n return np.float32\n if \"graphdef\" in trial:\n return np.dtype(object)\n return np.int32\n\n\ndef sequence_valid(client_metadata, rng, trial, model_name, dtype, len_mean,\n len_stddev, sequence_name):\n # Create a variable length sequence with \"start\" and \"end\" flags.\n seqlen = max(1, int(rng.normal(len_mean, len_stddev)))\n print(\"{} {}: valid seqlen = {}\".format(sequence_name, client_metadata[1],\n seqlen))\n\n values = rng.randint(0, 1024 * 1024, size=seqlen, dtype=dtype)\n\n steps = []\n expected_result = 0\n\n for idx, step in enumerate(range(seqlen)):\n flags = \"\"\n if idx == 0:\n flags += \",start\"\n if idx == (seqlen - 1):\n flags += \",end\"\n\n val = values[idx]\n delay_ms = None\n expected_result += val\n\n # (flag_str, value, expected_result, delay_ms)\n steps.append((flags, val, expected_result, delay_ms),)\n\n check_sequence_async(client_metadata,\n trial,\n model_name,\n dtype,\n steps,\n sequence_name=sequence_name)\n\n\ndef sequence_valid_valid(client_metadata, rng, trial, model_name, dtype,\n len_mean, len_stddev, sequence_name):\n # Create two variable length sequences with \"start\" and \"end\"\n # flags, where both sequences use the same correlation ID and are\n # sent back-to-back.\n seqlen = [\n max(1, int(rng.normal(len_mean, len_stddev))),\n max(1, int(rng.normal(len_mean, len_stddev)))\n ]\n print(\"{} {}: valid-valid seqlen[0] = {}, seqlen[1] = {}\".format(\n sequence_name, client_metadata[1], seqlen[0], seqlen[1]))\n\n values = [\n rng.randint(0, 1024 * 1024, size=seqlen[0], dtype=dtype),\n rng.randint(0, 1024 * 1024, size=seqlen[1], dtype=dtype)\n ]\n\n for p in [0, 1]:\n steps = []\n expected_result = 0\n\n for idx, step in enumerate(range(seqlen[p])):\n flags = \"\"\n if idx == 0:\n flags += \",start\"\n if idx == (seqlen[p] - 1):\n flags += \",end\"\n\n val = values[p][idx]\n delay_ms = None\n expected_result += val\n\n # (flag_str, value, expected_result, delay_ms)\n steps.append((flags, val, expected_result, delay_ms),)\n\n check_sequence_async(client_metadata,\n trial,\n model_name,\n dtype,\n steps,\n sequence_name=sequence_name)\n\n\ndef sequence_valid_no_end(client_metadata, rng, trial, model_name, dtype,\n len_mean, len_stddev, sequence_name):\n # Create two variable length sequences, the first with \"start\" and\n # \"end\" flags and the second with no \"end\" flag, where both\n # sequences use the same correlation ID and are sent back-to-back.\n seqlen = [\n max(1, int(rng.normal(len_mean, len_stddev))),\n max(1, int(rng.normal(len_mean, len_stddev)))\n ]\n print(\"{} {}: valid-no-end seqlen[0] = {}, seqlen[1] = {}\".format(\n sequence_name, client_metadata[1], seqlen[0], seqlen[1]))\n\n values = [\n rng.randint(0, 1024 * 1024, size=seqlen[0], dtype=dtype),\n rng.randint(0, 1024 * 1024, size=seqlen[1], dtype=dtype)\n ]\n\n for p in [0, 1]:\n steps = []\n expected_result = 0\n\n for idx, step in enumerate(range(seqlen[p])):\n flags = \"\"\n if idx == 0:\n flags += \",start\"\n if (p == 0) and (idx == (seqlen[p] - 1)):\n flags += \",end\"\n\n val = values[p][idx]\n delay_ms = None\n expected_result += val\n\n # (flag_str, value, expected_result, delay_ms)\n steps.append((flags, val, expected_result, delay_ms),)\n\n check_sequence_async(client_metadata,\n trial,\n model_name,\n dtype,\n steps,\n sequence_name=sequence_name)\n\n\ndef sequence_no_start(client_metadata, rng, trial, model_name, dtype,\n sequence_name):\n # Create a sequence without a \"start\" flag. Sequence should get an\n # error from the server.\n seqlen = 1\n print(\"{} {}: no-start seqlen = {}\".format(sequence_name,\n client_metadata[1], seqlen))\n\n values = rng.randint(0, 1024 * 1024, size=seqlen, dtype=dtype)\n\n steps = []\n\n for idx, step in enumerate(range(seqlen)):\n flags = None\n val = values[idx]\n delay_ms = None\n\n # (flag_str, value, expected_result, delay_ms)\n steps.append((flags, val, None, delay_ms),)\n\n try:\n check_sequence_async(client_metadata,\n trial,\n model_name,\n dtype,\n steps,\n sequence_name=sequence_name)\n assert False, \"expected inference failure from missing START flag\"\n except Exception as ex:\n if \"must specify the START flag\" not in ex.message():\n raise\n\n\ndef sequence_no_end(client_metadata, rng, trial, model_name, dtype, len_mean,\n len_stddev, sequence_name):\n # Create a variable length sequence with \"start\" flag but that\n # never ends. The sequence should be aborted by the server and its\n # slot reused for another sequence.\n seqlen = max(1, int(rng.normal(len_mean, len_stddev)))\n print(\"{} {}: no-end seqlen = {}\".format(sequence_name, client_metadata[1],\n seqlen))\n\n values = rng.randint(0, 1024 * 1024, size=seqlen, dtype=dtype)\n\n steps = []\n expected_result = 0\n\n for idx, step in enumerate(range(seqlen)):\n flags = \"\"\n if idx == 0:\n flags = \"start\"\n\n val = values[idx]\n delay_ms = None\n expected_result += val\n\n # (flag_str, value, expected_result, delay_ms)\n steps.append((flags, val, expected_result, delay_ms),)\n\n check_sequence_async(client_metadata,\n trial,\n model_name,\n dtype,\n steps,\n sequence_name=sequence_name)\n\n\ndef stress_thread(name, seed, pass_cnt, correlation_id_base, trial, model_name,\n dtype):\n # Thread responsible for generating sequences of inference\n # requests.\n global _thread_exceptions\n\n print(\"Starting thread {} with seed {}\".format(name, seed))\n rng = np.random.RandomState(seed)\n\n client_metadata_list = []\n\n try:\n # Must use streaming GRPC context to ensure each sequences'\n # requests are received in order. Create 2 common-use contexts\n # with different correlation IDs that are used for most\n # inference requests. Also create some rare-use contexts that\n # are used to make requests with rarely-used correlation IDs.\n #\n # Need to remember the last choice for each context since we\n # don't want some choices to follow others since that gives\n # results not expected. See below for details.\n common_cnt = 2\n rare_cnt = 8\n last_choices = []\n\n for c in range(common_cnt + rare_cnt):\n client_metadata_list.append(\n (grpcclient.InferenceServerClient(\"localhost:8001\",\n verbose=FLAGS.verbose),\n correlation_id_base + c))\n last_choices.append(None)\n\n rare_idx = 0\n for p in range(pass_cnt):\n # Common or rare context?\n if rng.rand() < 0.1:\n # Rare context...\n choice = rng.rand()\n client_idx = common_cnt + rare_idx\n\n # Send a no-end, valid-no-end or valid-valid\n # sequence... because it is a rare context this should\n # exercise the idle sequence path of the sequence\n # scheduler\n if choice < 0.33:\n sequence_no_end(client_metadata_list[client_idx],\n rng,\n trial,\n model_name,\n dtype,\n SEQUENCE_LENGTH_MEAN,\n SEQUENCE_LENGTH_STDEV,\n sequence_name=name)\n last_choices[client_idx] = \"no-end\"\n elif choice < 0.66:\n sequence_valid_no_end(client_metadata_list[client_idx],\n rng,\n trial,\n model_name,\n dtype,\n SEQUENCE_LENGTH_MEAN,\n SEQUENCE_LENGTH_STDEV,\n sequence_name=name)\n last_choices[client_idx] = \"valid-no-end\"\n else:\n sequence_valid_valid(client_metadata_list[client_idx],\n rng,\n trial,\n model_name,\n dtype,\n SEQUENCE_LENGTH_MEAN,\n SEQUENCE_LENGTH_STDEV,\n sequence_name=name)\n last_choices[client_idx] = \"valid-valid\"\n\n rare_idx = (rare_idx + 1) % rare_cnt\n else:\n # Common context...\n client_idx = 0 if rng.rand() < 0.5 else 1\n client_metadata = client_metadata_list[client_idx]\n last_choice = last_choices[client_idx]\n\n choice = rng.rand()\n\n # no-start cannot follow no-end since the server will\n # just assume that the no-start is a continuation of\n # the no-end sequence instead of being a sequence\n # missing start flag.\n if ((last_choice != \"no-end\") and\n (last_choice != \"valid-no-end\") and (choice < 0.01)):\n sequence_no_start(client_metadata,\n rng,\n trial,\n model_name,\n dtype,\n sequence_name=name)\n last_choices[client_idx] = \"no-start\"\n elif choice < 0.05:\n sequence_no_end(client_metadata,\n rng,\n trial,\n model_name,\n dtype,\n SEQUENCE_LENGTH_MEAN,\n SEQUENCE_LENGTH_STDEV,\n sequence_name=name)\n last_choices[client_idx] = \"no-end\"\n elif choice < 0.10:\n sequence_valid_no_end(client_metadata,\n rng,\n trial,\n model_name,\n dtype,\n SEQUENCE_LENGTH_MEAN,\n SEQUENCE_LENGTH_STDEV,\n sequence_name=name)\n last_choices[client_idx] = \"valid-no-end\"\n elif choice < 0.15:\n sequence_valid_valid(client_metadata,\n rng,\n trial,\n model_name,\n dtype,\n SEQUENCE_LENGTH_MEAN,\n SEQUENCE_LENGTH_STDEV,\n sequence_name=name)\n last_choices[client_idx] = \"valid-valid\"\n else:\n sequence_valid(client_metadata,\n rng,\n trial,\n model_name,\n dtype,\n SEQUENCE_LENGTH_MEAN,\n SEQUENCE_LENGTH_STDEV,\n sequence_name=name)\n last_choices[client_idx] = \"valid\"\n\n except Exception as ex:\n _thread_exceptions_mutex.acquire()\n try:\n _thread_exceptions.append(traceback.format_exc())\n finally:\n _thread_exceptions_mutex.release()\n\n # We need to explicitly close each client so that streams get\n # cleaned up and closed correctly, otherwise the application\n # can hang when exiting.\n for c, i in client_metadata_list:\n print(\"thread {} closing client {}\".format(name, i))\n c.close()\n\n print(\"Exiting thread {}\".format(name))\n\n\ndef check_status(model_name):\n client = grpcclient.InferenceServerClient(\"localhost:8001\",\n verbose=FLAGS.verbose)\n stats = client.get_inference_statistics(model_name)\n print(stats)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-v',\n '--verbose',\n action=\"store_true\",\n required=False,\n default=False,\n help='Enable verbose output')\n parser.add_argument('-r',\n '--random-seed',\n type=int,\n required=False,\n help='Random seed.')\n parser.add_argument('-t',\n '--concurrency',\n type=int,\n required=False,\n default=8,\n help='Request concurrency. Default is 8.')\n parser.add_argument(\n '-i',\n '--iterations',\n type=int,\n required=False,\n default=200,\n help='Number of iterations of stress test to run. Default is 200.')\n FLAGS = parser.parse_args()\n\n # Initialize the random seed. For reproducibility each thread\n # maintains its own RNG which is initialized based on this seed.\n randseed = 0\n if FLAGS.random_seed != None:\n randseed = FLAGS.random_seed\n else:\n randseed = int(time.time())\n np.random.seed(randseed)\n\n print(\"random seed = {}\".format(randseed))\n print(\"concurrency = {}\".format(FLAGS.concurrency))\n print(\"iterations = {}\".format(FLAGS.iterations))\n\n trial = \"custom\"\n dtype = get_datatype(trial)\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n threads = []\n for idx, thd in enumerate(range(FLAGS.concurrency)):\n thread_name = \"thread_{}\".format(idx)\n\n # Create the seed for the thread. Since these are created in\n # reproducible order off of the initial seed we will get\n # reproducible results when given the same seed.\n seed = np.random.randint(2**32)\n\n # Each thread is reserved a block of correlation IDs or size\n # CORRELATION_ID_BLOCK_SIZE\n correlation_id_base = 1 + (idx * CORRELATION_ID_BLOCK_SIZE)\n\n threads.append(\n threading.Thread(target=stress_thread,\n args=(thread_name, seed, FLAGS.iterations,\n correlation_id_base, trial, model_name,\n dtype)))\n\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n check_status(model_name)\n\n _thread_exceptions_mutex.acquire()\n try:\n if len(_thread_exceptions) > 0:\n for ex in _thread_exceptions:\n print(\"*********\\n{}\".format(ex))\n sys.exit(1)\n finally:\n _thread_exceptions_mutex.release()\n\n print(\"Exiting stress test\")\n sys.exit(0)\n" ]
[ [ "numpy.random.seed", "numpy.dtype", "numpy.full", "numpy.random.RandomState", "numpy.random.randint" ] ]
lstorchi/mrkvtheilcont
[ "d4215073a57b78d2b1ff424615e481ced4d3e7d5" ]
[ "markovc.py" ]
[ "import numpy.linalg\nimport numpy.random\nimport scipy.stats\nimport scipy.io\nimport argparse\nimport numpy\nimport math\nimport sys\nimport os\n\nimport os.path\n\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\n\n\nsys.path.append(\"./module\")\nimport mainmkvcmp\nimport basicutils\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"-m\",\"--rmat-filename\", help=\"Transition probability matrix filename\", \\\n type=str, required=True, dest=\"rmatfilename\")\nparser.add_argument(\"-b\", \"--imat-filename\", help=\"Rewards matrix filename\", \\\n type=str, required=True, dest=\"imatfilename\")\nparser.add_argument(\"-s\", \"--step\", help=\"Bin width \", \\\n type=float, required=False, default=0.25, dest=\"step\")\nparser.add_argument(\"-t\", \"--time-prev\", help=\"Forecasted period \", \\\n type=int, required=False, default=37, dest=\"tprev\")\nparser.add_argument(\"-n\", \"--max-run\", help=\"Monte carlo iterations \", \\\n type=int, required=True, dest=\"maxrun\")\nparser.add_argument(\"-M\", \"--name-of-matrix\", help=\"Name of the probability matrix \", \\\n type=str, required=False, default=\"ms\", dest=\"nameofmatrix\")\nparser.add_argument(\"-B\", \"--name-of-bpmatrix\", help=\"Name of the rewards matrix \", \\\n type=str, required=False, default=\"i_r\", dest=\"nameofbpmatrix\")\nparser.add_argument(\"-v\", \"--verbose\", help=\"increase output verbosity\", \\\n default=False, action=\"store_true\")\nparser.add_argument(\"-S\", \"--seed\", help=\"Using a seed for the random generator\", \\\n default=False, action=\"store_true\", dest=\"seed\")\n\nif len(sys.argv) == 1:\n parser.print_help()\n exit(1)\n\nargs = parser.parse_args()\n\nnamebp = args.nameofbpmatrix\nverbose = args.verbose\nfilename1 = args.rmatfilename\nfilename2 = args.imatfilename\nstep = args.step\ntprev = args.tprev\nnumofrun = args.maxrun\nnamems = args.nameofmatrix\n\nerrmsg = []\n\nif not (os.path.isfile(filename1)):\n print(\"File \" + filename1 + \" does not exist \")\n exit(1)\n\nif not (os.path.isfile(filename2)):\n print(\"File \" + filename2 + \" does not exist \")\n exit(1)\n\nmsd = scipy.io.loadmat(filename1)\nbpd = scipy.io.loadmat(filename2)\n\nif not(namems in msd.keys()):\n print (\"Cannot find \" + namems + \" in \" + filename1)\n print (msd.keys())\n exit(1)\n\nif not(namebp in bpd.keys()):\n print (\"Cannot find \" + namebp + \" in \" + filename2)\n print (bpd.keys())\n exit(1)\n\nif msd[namems].shape[0] != bpd[namebp].shape[0]:\n print (\"wrong dim of the input matrix\")\n exit(1)\n\nms = msd[namems]\ni_r = bpd[namebp]\n\nentropia = numpy.zeros(tprev, dtype='float64')\nvar = numpy.zeros((tprev), dtype='float64')\n\nrating = numpy.max(ms)\n\nmeanval = []\nstdeval = []\n \nallratings = []\nallratingsnins = []\n\ntime = ms.shape[1]\npr = numpy.zeros((rating,rating,time), dtype='float64')\nif not mainmkvcmp.main_mkc_comp_cont (ms, i_r, step, tprev, \\\n numofrun, verbose, True, args.seed, errmsg, entropia, \\\n var, allratings, allratingsnins, pr, meanval, stdeval):\n for m in errmsg:\n print (m)\n exit(1)\n" ]
[ [ "numpy.max", "numpy.zeros" ] ]
Alan-Du/Commodity_Tracker
[ "fc7d4c92535424be7dc82f47dd513332b4b772c9" ]
[ "Spread_Strat/HC_Spread.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 23 07:34:38 2020\r\n\r\n@author: shaolun du\r\n@contact: [email protected]\r\n\"\"\"\r\n\r\nimport DB.dbFetch as dbFetch\r\nimport pandas as pd\r\nimport datetime as dt\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.dates as mdates\r\n\r\ndef gen_HC_spread(start,end):\r\n #generate bean and meal and oil comparison\r\n myFmt = mdates.DateFormatter('%y/%m') # Dates format\r\n Ticker_1 = \"HC\"\r\n Ticker_2 = \"RB\"\r\n #Get raw market data\r\n index_data = pd.DataFrame(dbFetch.get_index_all()).set_index(\"Dates\")\r\n inv_df = pd.DataFrame(dbFetch.get_historical_inventory()).set_index(\"Dates\")\r\n \r\n market_1 = index_data[index_data[\"name\"]==Ticker_1][start:end]\r\n market_1[\"mid_price\"] = 0.5*(market_1[\"open\"]+market_1[\"close\"])\r\n market_1[\"mid_price\"] = market_1[\"mid_price\"]/market_1[\"mid_price\"][0]\r\n market_1 = market_1[[\"mid_price\",\"r1\"]]\r\n Inventory = inv_df.loc[inv_df[\"Product\"].str.upper()==Ticker_1][start:end][\"INV\"]\r\n market_1 = market_1.join( Inventory, how=\"left\" ).fillna(0)\r\n \r\n \r\n market_2 = index_data[index_data[\"name\"]==Ticker_2][start:end]\r\n market_2[\"mid_price\"] = 0.5*(market_2[\"open\"]+market_2[\"close\"])\r\n market_2[\"mid_price\"] = market_2[\"mid_price\"]/market_2[\"mid_price\"][0]\r\n market_2 = market_2[[\"mid_price\",\"r1\"]]\r\n Inventory = inv_df.loc[inv_df[\"Product\"].str.upper()==Ticker_2][start:end][\"INV\"]\r\n market_2 = market_2.join(Inventory,how=\"left\").fillna(0)\r\n \r\n # Start plotting\r\n #Fig1: Bean and Bean Meal spread plot\r\n #Subplot 1,2: spread and spread distribution\r\n spread_name = Ticker_1+\"_\"+Ticker_2+\"_Spread\"\r\n fig, axes = plt.subplots(nrows=5, ncols=2,figsize=(10,15)) \r\n market_1[spread_name] = market_1[\"mid_price\"]-market_2[\"mid_price\"]\r\n axes[0,0].plot(market_1[spread_name],color='C0', label=Ticker_1+\"-\"+Ticker_2)\r\n axes[0,0].legend()\r\n axes[0,1].hist(market_1[spread_name],bins=50,color='C1', label=spread_name)\r\n axes[0,1].axvline(market_1[spread_name][-1], color='k', linestyle='dashed', linewidth=3)\r\n bottom, top = axes[0,1].get_ylim()\r\n pct_rank = market_1[spread_name].sort_values().values.searchsorted(market_1[spread_name][-1])/len(market_1[spread_name])\r\n axes[0,1].text(market_1[spread_name][-1], top*0.9, 'Current:{:.1},\\nPct:{:.1%}'.format(market_1[spread_name][-1],pct_rank))\r\n \r\n #Subplot 3,4: single commodity price and roll yield\r\n axes[1,0].plot(market_1[\"mid_price\"],color='C0', label=Ticker_1)\r\n ax2 = axes[1,0].twinx()\r\n ax2.bar(market_1.index,market_1[\"r1\"],alpha=0.5,width=3,color='C4', label=\"RollYield\")\r\n axes[1,0].xaxis.set_major_formatter(myFmt)\r\n axes[1,0].legend()\r\n R1 = market_1[\"r1\"].dropna()\r\n pct_rank = R1.sort_values().values.searchsorted(R1[-1])/len(R1)\r\n axes[1,1].hist(R1,bins=50,color='C4',alpha=0.65)\r\n axes[1,1].axvline(R1[-1], color='k', linestyle='dashed', linewidth=3)\r\n bottom, top = axes[1,1].get_ylim()\r\n axes[1,1].text(R1[-1]*1.1, top*0.9, 'Current:{:.0%},\\nPct:{:.1%}'.format(R1[-1],pct_rank))\r\n \r\n axes[2,0].plot(market_2[\"mid_price\"],color='C0', label=Ticker_2)\r\n ax2 = axes[2,0].twinx()\r\n ax2.bar(market_2.index,market_2[\"r1\"],alpha=0.5,width=3,color='C4', label=\"RollYield\")\r\n axes[2,0].xaxis.set_major_formatter(myFmt)\r\n axes[2,0].legend()\r\n R1 = market_2[\"r1\"].dropna()\r\n pct_rank = R1.sort_values().values.searchsorted(R1[-1])/len(R1)\r\n axes[2,1].hist(R1,bins=50,color='C4',alpha=0.65)\r\n axes[2,1].axvline(R1[-1], color='k', linestyle='dashed', linewidth=3)\r\n bottom, top = axes[2,1].get_ylim()\r\n axes[2,1].text(R1[-1]*1.1, top*0.9, 'Current:{:.0%},\\nPct:{:.1%}'.format(R1[-1],pct_rank))\r\n \r\n axes[3,0].bar(market_1.index,market_1[\"INV\"],alpha=0.5,width=3,color='C4', label=Ticker_1+\"_INV\")\r\n axes[3,0].legend()\r\n pct_rank = market_1[\"INV\"].sort_values().values.searchsorted(market_1[\"INV\"][-1])/len(market_1[\"INV\"])\r\n axes[3,1].hist(market_1[\"INV\"],bins=50,color='C3',alpha=0.65)\r\n axes[3,1].axvline(market_1[\"INV\"][-1], color='k', linestyle='dashed', linewidth=3)\r\n bottom, top = axes[3,1].get_ylim()\r\n axes[3,1].text(market_1[\"INV\"][-1]*1.1, top*0.9, 'Current:{:,},\\nPct:{:.1%}'.format(market_1[\"INV\"][-1],pct_rank))\r\n \r\n axes[4,0].bar(market_2.index,market_2[\"INV\"],alpha=0.5,width=3,color='C4', label=Ticker_2+\"_INV\")\r\n axes[4,0].legend()\r\n pct_rank = market_2[\"INV\"].sort_values().values.searchsorted(market_2[\"INV\"][-1])/len(market_2[\"INV\"])\r\n axes[4,1].hist(market_2[\"INV\"],bins=50,color='C3',alpha=0.65)\r\n axes[4,1].axvline(market_2[\"INV\"][-1], color='k', linestyle='dashed', linewidth=3)\r\n bottom, top = axes[4,1].get_ylim()\r\n axes[4,1].text(market_2[\"INV\"][-1]*1.1, top*0.9, 'Current:{:,},\\nPct:{:.1%}'.format(market_2[\"INV\"][-1],pct_rank))\r\n \r\n fig.suptitle(Ticker_1+\"/\"+Ticker_2+\" Spread Strat\",y=0.9)\r\n return fig" ]
[ [ "matplotlib.dates.DateFormatter", "matplotlib.pyplot.subplots" ] ]
dkp-1024/my_machine_learning
[ "11ace64c7ae5c709f20cb6691529768b42d08d22" ]
[ "numpy_practice/a5_arrayConcatination.py" ]
[ "import numpy as np \n\n#You can concatenate two or more arrays at once.\nx = np.array([1, 2, 3])\ny = np.array([3, 2, 1])\nz = [21,21,21]\nprint(np.concatenate([x, y,z]))\n# array([ 1, 2, 3, 3, 2, 1, 21, 21, 21])\n\n#You can also use this function to create 2-dimensional arrays.\ngrid = np.array([[1,2,3],[4,5,6]])\nprint(np.concatenate([grid,grid]))\n# array([[1, 2, 3],\n# [4, 5, 6],\n# [1, 2, 3],\n# [4, 5, 6]])\n\n#Using its axis parameter, you can define row-wise or column-wise matrix\nprint(np.concatenate([grid,grid],axis=1))\n# array([[1, 2, 3, 1, 2, 3],\n# [4, 5, 6, 4, 5, 6]])\n\n# ............COMBINING 2D ARRAY WITH 1D ARRAY..................#\nx = np.array([3,4,5])\ngrid = np.array([[1,2,3],[17,18,19]])\nprint(np.vstack([x,grid]))\n# array([[ 3, 4, 5],\n# [ 1, 2, 3],\n# [17, 18, 19]])\n\n#Similarly, you can add an array using np.hstack\nz = np.array([[9],[9]])\nprint(np.hstack([grid,z]))\n# array([[ 1, 2, 3, 9],\n# [17, 18, 19, 9]])\n\n# split the arrays based on pre-defined positions\nx = np.arange(10)\nprint(x)\n# array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n\nx1,x2,x3 = np.split(x,[3,6])\nprint(x1,x2,x3)\n# [0 1 2] [3 4 5] [6 7 8 9]\n\ngrid = np.arange(16).reshape((4,4))\ngrid\nupper,lower = np.vsplit(grid,[2])\nprint (\"upper part \", upper, '\\n', \"lower part\", lower)\n# (array([[0, 1, 2, 3],\n# [4, 5, 6, 7]]), array([[ 8, 9, 10, 11],\n# [12, 13, 14, 15]]))\n\n# YOU CAN DO VARIOUS OPERATIONS ON THE ARRAYS AS WELL\n\n\n\n\n\n\n\n" ]
[ [ "numpy.hstack", "numpy.split", "numpy.arange", "numpy.vsplit", "numpy.concatenate", "numpy.array", "numpy.vstack" ] ]
Panesher/Machine-Learning
[ "dcc46c7816af209c5f031feb84a0eb3b2b5d9515" ]
[ "ML/HW-5/hw5code.py" ]
[ "import numpy as np\nfrom collections import Counter\nfrom sklearn.base import BaseEstimator, ClassifierMixin\n\ndef find_best_split(feature_vector, target_vector):\n \"\"\"\n Под критерием Джини здесь подразумевается следующая функция:\n $$Q(R) = -\\frac {|R_l|}{|R|}H(R_l) -\\frac {|R_r|}{|R|}H(R_r)$$,\n $R$ — множество объектов, $R_l$ и $R_r$ — объекты, попавшие в левое и правое поддерево,\n $H(R) = 1-p_1^2-p_0^2$, $p_1$, $p_0$ — доля объектов класса 1 и 0 соответственно.\n\n Указания:\n * Пороги, приводящие к попаданию в одно из поддеревьев пустого множества объектов, не рассматриваются.\n * В качестве порогов, нужно брать среднее двух сосдених (при сортировке) значений признака\n * Поведение функции в случае константного признака может быть любым.\n * При одинаковых приростах Джини нужно выбирать минимальный сплит.\n * За наличие в функции циклов балл будет снижен. Векторизуйте! :)\n\n :param feature_vector: вещественнозначный вектор значений признака\n :param target_vector: вектор классов объектов, len(feature_vector) == len(target_vector)\n\n :return thresholds: отсортированный по возрастанию вектор со всеми возможными порогами, по которым объекты можно\n разделить на две различные подвыборки, или поддерева\n :return ginis: вектор со значениями критерия Джини для каждого из порогов в thresholds len(ginis) == len(thresholds)\n :return threshold_best: оптимальный порог (число)\n :return gini_best: оптимальное значение критерия Джини (число)\n \"\"\"\n fv, tv = np.array(feature_vector), np.array(target_vector)\n if np.unique(fv).shape[0] < 2:\n return np.zeros(1), np.zeros(1), 0, 0\n \n idx = np.argsort(fv)\n feat_vec_sorted = fv[idx]\n target_sorted = tv[idx]\n elements_not_equal = feat_vec_sorted[1:] != feat_vec_sorted[:-1]\n\n thresholds = ((feat_vec_sorted[1:] + feat_vec_sorted[:-1]) / 2)[elements_not_equal]\n \n cnt_l = np.arange(1, feat_vec_sorted.shape[0])[elements_not_equal]\n cnt_r = feat_vec_sorted.shape[0] - cnt_l\n target_prefix = np.cumsum(target_sorted)[:-1][elements_not_equal] / cnt_l\n target_z = (np.cumsum(target_sorted[::-1][:-1]) / np.arange(\n 1, feat_vec_sorted.shape[0]))[::-1][elements_not_equal]\n H_r = 1 - (1 - target_z) ** 2 - target_z ** 2\n H_l = 1 - (1 - target_prefix) ** 2 - target_prefix ** 2\n \n Q = -(cnt_l * H_l + cnt_r * H_r) / fv.shape[0]\n mx = Q.argmax()\n \n return thresholds, Q, thresholds[mx], Q[mx]\n\n\nclass DecisionTree(BaseEstimator, ClassifierMixin):\n def __init__(self, feature_types, max_depth=None, min_samples_split=None, min_samples_leaf=None):\n if np.any(list(map(lambda x: x != \"real\" and x != \"categorical\", feature_types))):\n raise ValueError(\"There is unknown feature type\")\n\n self._tree = {}\n self._feature_types = feature_types\n self._max_depth = max_depth\n self._min_samples_split = min_samples_split\n self._min_samples_leaf = min_samples_leaf\n \n # https://github.com/scikit-learn/scikit-learn/blob/0d378913b/sklearn/base.py#L188\n def get_params(self, deep=True):\n out = dict()\n for key in self._get_param_names():\n value = getattr(self, '_' + key)\n if deep and hasattr(value, \"get_params\"):\n deep_items = value.get_params().items()\n out.update((key + \"__\" + k, val) for k, val in deep_items)\n out[key] = value\n return out\n \n def _is_threshold_correct_by_leaf(self, x, threshold):\n if self._min_samples_split is None:\n return True\n \n if x[x > threshold].shape[0] > self._min_samples_split and x[x < threshold].shape[0] > self._min_samples_split:\n return True\n \n return False\n \n def _find_correct_split_leaf(self, x, thresholds, ginis, threshold, gini):\n if self._is_threshold_correct_by_leaf(x, threshold):\n return threshold, gini\n\n best_gini = -np.inf\n best_threshold = None\n\n for threshold, gini in zip(thresholds, ginis):\n if gini > best_gini and self._is_threshold_correct_by_leaf(x, threshold):\n best_gini = gini\n best_threshold = threshold\n\n return best_threshold, best_gini\n \n def _check_stop_condition(self, depth, samples):\n if self._max_depth is not None and depth >= self._max_depth:\n return True\n if self._min_samples_split is not None and samples < self._min_samples_split:\n return True\n if self._min_samples_split is not None and samples // 2 < self._min_samples_leaf:\n return True\n \n return False\n\n def _fit_node(self, sub_X, sub_y, node, depth=0):\n if np.all(sub_y == sub_y[0]):\n node[\"type\"] = \"terminal\"\n node[\"class\"] = sub_y[0]\n return\n \n if self._check_stop_condition(depth, sub_y.shape[0]):\n node[\"type\"] = \"terminal\"\n node[\"class\"] = Counter(sub_y).most_common(1)[0][0]\n return\n\n feature_best, threshold_best, gini_best, split = None, None, None, None\n for feature in range(sub_X.shape[1]):\n feature_type = self._feature_types[feature]\n categories_map = {}\n\n if feature_type == \"real\":\n feature_vector = sub_X[:, feature]\n elif feature_type == \"categorical\":\n counts = Counter(sub_X[:, feature])\n clicks = Counter(sub_X[sub_y == 1, feature])\n ratio = {}\n for key, current_count in counts.items():\n if key in clicks:\n current_click = clicks[key]\n else:\n current_click = 0\n ratio[key] = current_click / current_count\n sorted_categories = list(map(lambda x: x[0], sorted(ratio.items(), key=lambda x: x[1])))\n categories_map = dict(zip(sorted_categories, list(range(len(sorted_categories)))))\n\n feature_vector = np.array(list(map(lambda x: categories_map[x], sub_X[:, feature])))\n else:\n raise ValueError\n\n if np.unique(feature_vector).shape[0] < 2:\n continue\n\n thresholds, ginis, threshold, gini = find_best_split(feature_vector, sub_y)\n threshold, gini = self._find_correct_split_leaf(feature_vector, thresholds, ginis, threshold, gini)\n if threshold is None:\n node[\"type\"] = \"terminal\"\n node[\"class\"] = Counter(sub_y).most_common(1)[0][0]\n return\n \n if gini_best is None or gini > gini_best:\n feature_best = feature\n gini_best = gini\n split = feature_vector < threshold\n\n if feature_type == \"real\":\n threshold_best = threshold\n elif feature_type == \"categorical\":\n threshold_best = list(map(lambda x: x[0],\n filter(lambda x: x[1] < threshold, categories_map.items())))\n else:\n raise ValueError\n\n if feature_best is None:\n node[\"type\"] = \"terminal\"\n node[\"class\"] = Counter(sub_y).most_common(1)[0][0]\n return\n\n node[\"type\"] = \"nonterminal\"\n\n node[\"feature_split\"] = feature_best\n if self._feature_types[feature_best] == \"real\":\n node[\"threshold\"] = threshold_best\n elif self._feature_types[feature_best] == \"categorical\":\n node[\"categories_split\"] = threshold_best\n else:\n raise ValueError\n node[\"left_child\"], node[\"right_child\"] = {}, {}\n self._fit_node(sub_X[split], sub_y[split], node[\"left_child\"], depth + 1)\n self._fit_node(sub_X[np.logical_not(split)], sub_y[np.logical_not(split)], node[\"right_child\"], depth + 1)\n\n def _predict_node(self, x, node):\n if node['type'] == 'terminal':\n return node['class']\n \n feature_split = self._feature_types[node['feature_split']]\n if feature_split == 'categorical':\n child = node['left_child'] if x[node['feature_split']] in node['categories_split'] else node['right_child']\n elif feature_split == 'real':\n child = node['left_child'] if x[node['feature_split']] < node['threshold'] else node['right_child']\n else:\n raise ValueError\n \n return self._predict_node(x, child)\n\n def fit(self, X, y):\n self._fit_node(np.array(X), np.array(y), self._tree)\n\n def predict(self, X_):\n predicted = []\n X = np.array(X_)\n for x in X:\n predicted.append(self._predict_node(x, self._tree))\n return np.array(predicted)\n" ]
[ [ "numpy.logical_not", "numpy.unique", "numpy.arange", "numpy.cumsum", "numpy.all", "numpy.argsort", "numpy.array", "numpy.zeros" ] ]
gauravsdeshmukh/PlugFlowCrystallizer
[ "684e80438ef2912fab60201f305f7c288d4b5274" ]
[ "PlugFlowCrystallizer_Input.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 27 17:33:26 2018\r\n\r\n@author: Gaurav\r\n\"\"\"\r\n\r\nimport scipy as sci\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cm\r\nfrom PlugFlowCrystallizer import *\r\n\r\n###############################################################################\r\n###########################USER INPUT BEGINS###################################\r\n############################DEFINE SPATIAL AND TEMPORAL PARAMETERS#############\r\nlength=10\r\nbreadth=0.4\r\ncolpts=150\r\nrowpts=40\r\ntime=100\r\ncrystal_timedelay=20 #The time after which crystals are introduced in the system\r\n###############################MISC############################################\r\nCFL_number=0.1 #Do not touch this unless solution diverges\r\nfile_flag=1 #Keep 1 to print results to file\r\nfile_interval=50 #Regular time interval after which files are written\r\nplot_flag=1 #Keep 1 to plot results at the end\r\n###########################DEFINE PHYSICAL PARAMETERS##########################\r\nrho=1\r\nmu=0.01\r\nk=0.6\r\ncp=41.8\r\nD=1.5e-02\r\n###########################DEFINE CRYSTAL PARAMETERS###########################\r\nMW=101.1032\r\nkg=1.1612*10**(-4)\r\ng=1.32\r\nrhoc=1400\r\ndH=-50\r\n##########################DEFINE INITIAL MOMENTUM PARAMETERS###################\r\nu_in=2\r\nv_wall=0\r\np_out=0\r\n##########################DEFINE INITIAL TEMPERATURE PARAMETERS###############\r\nh=4000\r\nT_jacket=303\r\nT_in=363\r\nT_init=293\r\n##########################DEFINE INITIAL CONCENTRATION CONDITIONS##############\r\nC_init=0\r\nC_in=150\r\n#########################DEFINE INITIAL CRYSTAL CONDITIONS#####################\r\nN_init=1000\r\nL_cryst=1e-3\r\nL_init=N_init*L_cryst\r\n\r\n###############################################################################\r\n########################CREATE SPACE OBJECT####################################\r\npfc=Space()\r\npfc.CreateMesh(rowpts,colpts)\r\npfc.SetDeltas(breadth,length)\r\nwater=Fluid(rho,mu,k,cp,D)\r\n\r\n###############################################################################\r\n#########################BOUNDARY DEFINITIONS##################################\r\n########################HEAT CONVECTION BOUNDARY###############################\r\njacketvaluetop=1/(1-(h*pfc.dy/water.k))\r\njacketconstanttop=(h*pfc.dy/water.k)/(1-(h*pfc.dy/water.k))*T_jacket\r\njacketvaluebottom=1/(1+(h*pfc.dy/water.k))\r\njacketconstantbottom=(h*pfc.dy/water.k)/(1+(h*pfc.dy/water.k))*T_jacket\r\n########################CREATE BOUNDARY OBJECTS################################\r\n###########################VELOCITY############################################\r\nflow=Boundary(\"D\",u_in)\r\nnoslip=Boundary(\"D\",v_wall)\r\nzeroflux=Boundary(\"N\",0)\r\n############################PRESSURE###########################################\r\npressureatm=Boundary(\"D\",p_out)\r\n############################TEMPERATURE########################################\r\njackettop=Boundary(\"C\",jacketvaluetop,jacketconstanttop)\r\njacketbottom=Boundary(\"C\",jacketvaluebottom,jacketconstantbottom)\r\nintemp=Boundary(\"D\",T_in)\r\n############################CONCENTRATION######################################\r\ninconc=Boundary(\"D\",C_in)\r\n\r\n###############################################################################\r\n#########################CREATE CRYSTAL OBJECT#################################\r\nkno3=Crystal(\"Potassium nitrate\",pfc)\r\nkno3.SetConstants(MW,kg,g,rhoc,dH)\r\n\r\n###############################################################################\r\n#########################SET INITIAL CONDITIONS IN DOMAIN######################\r\nSetInitialT(pfc,T_init)\r\nSetInitialC(pfc,C_init)\r\nSetInitialNLAV(kno3,N_init,L_init)\r\n\r\n#######################USER INPUT ENDS#########################################\r\n###############################################################################\r\n#############################INITIALIZATION####################################\r\nt=0\r\ni=0\r\n############################THE RUN############################################\r\nwhile(t<time):\r\n CFL=CFL_number\r\n SetTimeStep(CFL,pfc,water)\r\n timestep=pfc.dt\r\n \r\n SetUBoundary(pfc,flow,zeroflux,noslip,noslip)\r\n SetVBoundary(pfc,zeroflux,zeroflux,noslip,noslip)\r\n SetPBoundary(pfc,zeroflux,pressureatm,zeroflux,zeroflux)\r\n SetTBoundary(pfc,intemp,zeroflux,jackettop,jacketbottom)\r\n SetCBoundary(pfc,inconc,zeroflux,zeroflux,zeroflux)\r\n \r\n GetStarredVelocities(pfc,water)\r\n SolvePressurePoisson(pfc,water,zeroflux,pressureatm,zeroflux,zeroflux)\r\n SolveMomentumEquation(pfc,water)\r\n AdjustUV(pfc)\r\n \r\n SolveEnergyEquation(pfc,kno3,water)\r\n AdjustT(pfc)\r\n \r\n SolveSpeciesEquation(pfc,kno3,water)\r\n AdjustC(pfc)\r\n \r\n if(t>crystal_timedelay):\r\n GetCstar(pfc,kno3)\r\n GetG(pfc,kno3)\r\n \r\n SolvePopulationBalanceEquation(pfc,kno3)\r\n AdjustNLAV(kno3)\r\n \r\n SetCentrePUVTCNLAV(pfc,kno3)\r\n \r\n if(t<40):\r\n file_flag=1\r\n else:\r\n file_flag=0\r\n \r\n if(file_flag==1):\r\n WriteToFile(pfc,kno3,i,file_interval)\r\n \r\n u=pfc.u\r\n v=pfc.v\r\n p=pfc.p\r\n T=pfc.T\r\n C=pfc.C\r\n G=kno3.G\r\n N=kno3.N\r\n L=kno3.L\r\n u_c=pfc.u_c\r\n v_c=pfc.v_c\r\n p_c=pfc.p_c\r\n T_c=pfc.T_c\r\n C_c=pfc.C_c\r\n N_c=kno3.N_c\r\n L_c=kno3.L_c\r\n A_c=kno3.A_c\r\n V_c=kno3.V_c\r\n t+=timestep\r\n i+=1\r\n #nan check\r\n if(sci.isnan(sci.sum(C))):\r\n break\r\n print(time-t)\r\n\r\n###########################END OF RUN##########################################\r\n###############################################################################\r\n#######################SET ARRAYS FOR PLOTTING#################################\r\nx=sci.linspace(0,length,colpts)\r\ny=sci.linspace(0,breadth,rowpts)\r\n[X,Y]=sci.meshgrid(x,y)\r\n\r\nu=pfc.u\r\nv=pfc.v\r\np=pfc.p\r\nT=pfc.T\r\nC=pfc.C\r\nG=kno3.G\r\nN=kno3.N\r\nL=kno3.L\r\nu_c=pfc.u_c\r\nv_c=pfc.v_c\r\np_c=pfc.p_c\r\nT_c=pfc.T_c\r\nC_c=pfc.C_c\r\nN_c=kno3.N_c\r\nL_c=kno3.L_c\r\n\r\n######################EXTRA PLOTTING CODE BELOW################################\r\nif(plot_flag==1):\r\n plt.figure(figsize=(16,8))\r\n plt.contourf(X,Y,p_c,cmap=cm.viridis)\r\n plt.colorbar()\r\n plt.quiver(X,Y,u_c,v_c)\r\n plt.title(\"Velocity and Pressure Plot\")\r\n \r\n plt.figure(figsize=(16,8))\r\n plt.contourf(X,Y,T_c,cmap=cm.inferno)\r\n plt.colorbar()\r\n plt.title(\"Temperature Plot\")\r\n \r\n plt.figure(figsize=(16,8))\r\n plt.contourf(X,Y,C_c,cmap=cm.viridis)\r\n plt.colorbar()\r\n plt.title(\"Concentration Plot\")\r\n \r\n plt.figure(figsize=(16,8))\r\n plt.contourf(X,Y,L_c/N_c,cmap=cm.viridis)\r\n plt.colorbar()\r\n plt.title(\"Crystal Length Plot\")\r\n\r\n\r\n\r\n\r\n###########################END OF FILE#########################################" ]
[ [ "scipy.linspace", "matplotlib.pyplot.contourf", "matplotlib.pyplot.title", "scipy.sum", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.quiver", "scipy.meshgrid", "matplotlib.pyplot.figure" ] ]
res-life/cudf
[ "94a5d4180b1281d4250e9f915e547789d8da3ce0" ]
[ "python/cudf/cudf/tests/test_stats.py" ]
[ "# Copyright (c) 2018-2022, NVIDIA CORPORATION.\n\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport cupy as cp\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport cudf\nfrom cudf.datasets import randomdata\nfrom cudf.testing._utils import assert_eq, assert_exceptions_equal\n\nparams_dtypes = [np.int32, np.uint32, np.float32, np.float64]\nmethods = [\"min\", \"max\", \"sum\", \"mean\", \"var\", \"std\"]\n\ninterpolation_methods = [\"linear\", \"lower\", \"higher\", \"midpoint\", \"nearest\"]\n\n\[email protected](\"method\", methods)\[email protected](\"dtype\", params_dtypes)\[email protected](\"skipna\", [True, False])\ndef test_series_reductions(method, dtype, skipna):\n np.random.seed(0)\n arr = np.random.random(100)\n if np.issubdtype(dtype, np.integer):\n arr *= 100\n mask = arr > 10\n else:\n mask = arr > 0.5\n\n arr = arr.astype(dtype)\n if dtype in (np.float32, np.float64):\n arr[[2, 5, 14, 19, 50, 70]] = np.nan\n sr = cudf.Series(arr)\n sr[~mask] = None\n psr = sr.to_pandas()\n psr[~mask] = np.nan\n\n def call_test(sr, skipna):\n fn = getattr(sr, method)\n if method in [\"std\", \"var\"]:\n return fn(ddof=1, skipna=skipna)\n else:\n return fn(skipna=skipna)\n\n expect, got = call_test(psr, skipna=skipna), call_test(sr, skipna=skipna)\n\n np.testing.assert_approx_equal(expect, got)\n\n\[email protected](\"method\", methods)\ndef test_series_reductions_concurrency(method):\n e = ThreadPoolExecutor(10)\n\n np.random.seed(0)\n srs = [cudf.Series(np.random.random(10000)) for _ in range(1)]\n\n def call_test(sr):\n fn = getattr(sr, method)\n if method in [\"std\", \"var\"]:\n return fn(ddof=1)\n else:\n return fn()\n\n def f(sr):\n return call_test(sr + 1)\n\n list(e.map(f, srs * 50))\n\n\[email protected](\"ddof\", range(3))\ndef test_series_std(ddof):\n np.random.seed(0)\n arr = np.random.random(100) - 0.5\n sr = cudf.Series(arr)\n pd = sr.to_pandas()\n got = sr.std(ddof=ddof)\n expect = pd.std(ddof=ddof)\n np.testing.assert_approx_equal(expect, got)\n\n\ndef test_series_unique():\n for size in [10**x for x in range(5)]:\n arr = np.random.randint(low=-1, high=10, size=size)\n mask = arr != -1\n sr = cudf.Series(arr)\n sr[~mask] = None\n assert set(arr[mask]) == set(sr.unique().dropna().to_numpy())\n assert len(set(arr[mask])) == sr.nunique()\n\n\[email protected](\n \"nan_as_null, dropna\",\n [(True, True), (True, False), (False, True), (False, False)],\n)\ndef test_series_nunique(nan_as_null, dropna):\n # We remove nulls as opposed to NaNs using the dropna parameter,\n # so to test against pandas we replace NaN with another discrete value\n cudf_series = cudf.Series([1, 2, 2, 3, 3], nan_as_null=nan_as_null)\n pd_series = pd.Series([1, 2, 2, 3, 3])\n expect = pd_series.nunique(dropna=dropna)\n got = cudf_series.nunique(dropna=dropna)\n assert expect == got\n\n cudf_series = cudf.Series(\n [1.0, 2.0, 3.0, np.nan, None], nan_as_null=nan_as_null\n )\n if nan_as_null is True:\n pd_series = pd.Series([1.0, 2.0, 3.0, np.nan, None])\n else:\n pd_series = pd.Series([1.0, 2.0, 3.0, -1.0, None])\n\n expect = pd_series.nunique(dropna=dropna)\n got = cudf_series.nunique(dropna=dropna)\n assert expect == got\n\n cudf_series = cudf.Series([1.0, np.nan, np.nan], nan_as_null=nan_as_null)\n if nan_as_null is True:\n pd_series = pd.Series([1.0, np.nan, np.nan])\n else:\n pd_series = pd.Series([1.0, -1.0, -1.0])\n expect = pd_series.nunique(dropna=dropna)\n got = cudf_series.nunique(dropna=dropna)\n assert expect == got\n\n\ndef test_series_scale():\n arr = pd.Series(np.random.randint(low=-10, high=10, size=100))\n sr = cudf.Series(arr)\n\n vmin = arr.min()\n vmax = arr.max()\n scaled = (arr - vmin) / (vmax - vmin)\n assert scaled.min() == 0\n assert scaled.max() == 1\n assert_eq(sr.scale(), scaled)\n\n\[email protected](\"int_method\", interpolation_methods)\ndef test_exact_quantiles(int_method):\n arr = np.asarray([6.8, 0.15, 3.4, 4.17, 2.13, 1.11, -1.01, 0.8, 5.7])\n quant_values = [0.0, 0.25, 0.33, 0.5, 1.0]\n\n df = pd.DataFrame(arr)\n gdf_series = cudf.Series(arr)\n\n q1 = gdf_series.quantile(\n quant_values, interpolation=int_method, exact=True\n )\n\n q2 = df.quantile(quant_values, interpolation=int_method)\n\n np.testing.assert_allclose(\n q1.to_pandas().values, np.array(q2.values).T.flatten(), rtol=1e-10\n )\n\n\[email protected](\"int_method\", interpolation_methods)\ndef test_exact_quantiles_int(int_method):\n arr = np.asarray([7, 0, 3, 4, 2, 1, -1, 1, 6])\n quant_values = [0.0, 0.25, 0.33, 0.5, 1.0]\n\n df = pd.DataFrame(arr)\n gdf_series = cudf.Series(arr)\n\n q1 = gdf_series.quantile(\n quant_values, interpolation=int_method, exact=True\n )\n\n q2 = df.quantile(quant_values, interpolation=int_method)\n\n np.testing.assert_allclose(\n q1.to_pandas().values, np.array(q2.values).T.flatten(), rtol=1e-10\n )\n\n\ndef test_approx_quantiles():\n\n arr = np.asarray([6.8, 0.15, 3.4, 4.17, 2.13, 1.11, -1.01, 0.8, 5.7])\n quant_values = [0.0, 0.25, 0.33, 0.5, 1.0]\n\n gdf_series = cudf.Series(arr)\n pdf_series = pd.Series(arr)\n\n q1 = gdf_series.quantile(quant_values, exact=False)\n q2 = pdf_series.quantile(quant_values)\n\n assert_eq(q1, q2)\n\n\ndef test_approx_quantiles_int():\n arr = np.asarray([1, 2, 3])\n quant_values = [0.5]\n approx_results = [2]\n\n gdf_series = cudf.Series(arr)\n\n q1 = gdf_series.quantile(quant_values, exact=False)\n\n assert approx_results == q1.to_pandas().values\n\n\[email protected](\"data\", [[], [1, 2, 3, 10, 326497]])\[email protected](\n \"q\",\n [\n [],\n 0.5,\n 1,\n 0.234,\n [0.345],\n [0.243, 0.5, 1],\n np.array([0.5, 1]),\n cp.array([0.5, 1]),\n ],\n)\ndef test_misc_quantiles(data, q):\n\n pdf_series = cudf.utils.utils._create_pandas_series(data=data)\n gdf_series = cudf.Series(data)\n\n expected = pdf_series.quantile(q.get() if isinstance(q, cp.ndarray) else q)\n actual = gdf_series.quantile(q)\n assert_eq(expected, actual)\n\n\[email protected](\n \"data\",\n [\n cudf.Series(np.random.normal(-100, 100, 1000)),\n cudf.Series(np.random.randint(-50, 50, 1000)),\n cudf.Series(np.zeros(100)),\n cudf.Series(np.repeat(np.nan, 100)),\n cudf.Series(np.array([1.123, 2.343, np.nan, 0.0])),\n cudf.Series(\n [5, 10, 53, None, np.nan, None, 12, 43, -423], nan_as_null=False\n ),\n cudf.Series([1.1032, 2.32, 43.4, 13, -312.0], index=[0, 4, 3, 19, 6]),\n cudf.Series([]),\n cudf.Series([-3]),\n ],\n)\[email protected](\"null_flag\", [False, True])\ndef test_kurtosis_series(data, null_flag):\n pdata = data.to_pandas()\n\n if null_flag and len(data) > 2:\n data.iloc[[0, 2]] = None\n pdata.iloc[[0, 2]] = None\n\n got = data.kurtosis()\n got = got if np.isscalar(got) else got.to_numpy()\n expected = pdata.kurtosis()\n np.testing.assert_array_almost_equal(got, expected)\n\n got = data.kurt()\n got = got if np.isscalar(got) else got.to_numpy()\n expected = pdata.kurt()\n np.testing.assert_array_almost_equal(got, expected)\n\n got = data.kurt(numeric_only=False)\n got = got if np.isscalar(got) else got.to_numpy()\n expected = pdata.kurt(numeric_only=False)\n np.testing.assert_array_almost_equal(got, expected)\n\n with pytest.raises(NotImplementedError):\n data.kurt(numeric_only=True)\n\n\[email protected](\n \"data\",\n [\n cudf.Series(np.random.normal(-100, 100, 1000)),\n cudf.Series(np.random.randint(-50, 50, 1000)),\n cudf.Series(np.zeros(100)),\n cudf.Series(np.repeat(np.nan, 100)),\n cudf.Series(np.array([1.123, 2.343, np.nan, 0.0])),\n cudf.Series(\n [5, 10, 53, None, np.nan, None, 12, 43, -423], nan_as_null=False\n ),\n cudf.Series([1.1032, 2.32, 43.4, 13, -312.0], index=[0, 4, 3, 19, 6]),\n cudf.Series([]),\n cudf.Series([-3]),\n ],\n)\[email protected](\"null_flag\", [False, True])\ndef test_skew_series(data, null_flag):\n pdata = data.to_pandas()\n\n if null_flag and len(data) > 2:\n data.iloc[[0, 2]] = None\n pdata.iloc[[0, 2]] = None\n\n got = data.skew()\n expected = pdata.skew()\n got = got if np.isscalar(got) else got.to_numpy()\n np.testing.assert_array_almost_equal(got, expected)\n\n got = data.skew(numeric_only=False)\n expected = pdata.skew(numeric_only=False)\n got = got if np.isscalar(got) else got.to_numpy()\n np.testing.assert_array_almost_equal(got, expected)\n\n with pytest.raises(NotImplementedError):\n data.skew(numeric_only=True)\n\n\[email protected](\"dtype\", params_dtypes)\[email protected](\"num_na\", [0, 1, 50, 99, 100])\ndef test_series_median(dtype, num_na):\n np.random.seed(0)\n arr = np.random.random(100)\n if np.issubdtype(dtype, np.integer):\n arr *= 100\n mask = np.arange(100) >= num_na\n\n arr = arr.astype(dtype)\n sr = cudf.Series(arr)\n sr[~mask] = None\n arr2 = arr[mask]\n ps = pd.Series(arr2, dtype=dtype)\n\n actual = sr.median(skipna=True)\n desired = ps.median(skipna=True)\n\n np.testing.assert_approx_equal(actual, desired)\n\n # only for float until integer null supported convert to pandas in cudf\n # eg. pd.Int64Dtype\n if np.issubdtype(dtype, np.floating):\n ps = sr.to_pandas()\n actual = sr.median(skipna=False)\n desired = ps.median(skipna=False)\n np.testing.assert_approx_equal(actual, desired)\n\n\[email protected](\n \"data\",\n [\n np.random.normal(-100, 100, 1000),\n np.random.randint(-50, 50, 1000),\n np.zeros(100),\n np.array([1.123, 2.343, np.nan, 0.0]),\n np.array([-2, 3.75, 6, None, None, None, -8.5, None, 4.2]),\n cudf.Series([]),\n cudf.Series([-3]),\n ],\n)\[email protected](\"periods\", range(-5, 5))\[email protected](\"fill_method\", [\"ffill\", \"bfill\", \"pad\", \"backfill\"])\ndef test_series_pct_change(data, periods, fill_method):\n cs = cudf.Series(data)\n ps = cs.to_pandas()\n\n if np.abs(periods) <= len(cs):\n got = cs.pct_change(periods=periods, fill_method=fill_method)\n expected = ps.pct_change(periods=periods, fill_method=fill_method)\n np.testing.assert_array_almost_equal(\n got.to_numpy(na_value=np.nan), expected\n )\n\n\[email protected](\n \"data1\",\n [\n np.random.normal(-100, 100, 1000),\n np.random.randint(-50, 50, 1000),\n np.zeros(100),\n np.repeat(np.nan, 100),\n np.array([1.123, 2.343, np.nan, 0.0]),\n cudf.Series([5, 10, 53, None, np.nan, None], nan_as_null=False),\n cudf.Series([1.1, 2.32, 43.4], index=[0, 4, 3]),\n cudf.Series([]),\n cudf.Series([-3]),\n ],\n)\[email protected](\n \"data2\",\n [\n np.random.normal(-100, 100, 1000),\n np.random.randint(-50, 50, 1000),\n np.zeros(100),\n np.repeat(np.nan, 100),\n np.array([1.123, 2.343, np.nan, 0.0]),\n cudf.Series([1.1, 2.32, 43.4], index=[0, 500, 4000]),\n cudf.Series([5]),\n ],\n)\ndef test_cov1d(data1, data2):\n gs1 = cudf.Series(data1)\n gs2 = cudf.Series(data2)\n\n ps1 = gs1.to_pandas()\n ps2 = gs2.to_pandas()\n\n got = gs1.cov(gs2)\n expected = ps1.cov(ps2)\n np.testing.assert_approx_equal(got, expected, significant=8)\n\n\[email protected](\n \"data1\",\n [\n np.random.normal(-100, 100, 1000),\n np.random.randint(-50, 50, 1000),\n np.zeros(100),\n np.repeat(np.nan, 100),\n np.array([1.123, 2.343, np.nan, 0.0]),\n cudf.Series([5, 10, 53, None, np.nan, None], nan_as_null=False),\n cudf.Series([1.1032, 2.32, 43.4], index=[0, 4, 3]),\n cudf.Series([]),\n cudf.Series([-3]),\n ],\n)\[email protected](\n \"data2\",\n [\n np.random.normal(-100, 100, 1000),\n np.random.randint(-50, 50, 1000),\n np.zeros(100),\n np.repeat(np.nan, 100),\n np.array([1.123, 2.343, np.nan, 0.0]),\n cudf.Series([1.1, 2.32, 43.4], index=[0, 500, 4000]),\n cudf.Series([5]),\n ],\n)\[email protected](\"method\", [\"spearman\", \"pearson\"])\ndef test_corr1d(data1, data2, method):\n if method == \"spearman\":\n # Pandas uses scipy.stats.spearmanr code-path\n pytest.importorskip(\"scipy\")\n\n gs1 = cudf.Series(data1)\n gs2 = cudf.Series(data2)\n\n ps1 = gs1.to_pandas()\n ps2 = gs2.to_pandas()\n\n got = gs1.corr(gs2, method)\n expected = ps1.corr(ps2, method)\n np.testing.assert_approx_equal(got, expected, significant=8)\n\n\[email protected](\"method\", [\"spearman\", \"pearson\"])\ndef test_df_corr(method):\n\n gdf = randomdata(100, {str(x): float for x in range(50)})\n pdf = gdf.to_pandas()\n got = gdf.corr(method)\n expected = pdf.corr(method)\n assert_eq(got, expected)\n\n\[email protected](\n \"data\",\n [\n [0.0, 1, 3, 6, np.NaN, 7, 5.0, np.nan, 5, 2, 3, -100],\n [np.nan] * 3,\n [1, 5, 3],\n [],\n ],\n)\[email protected](\n \"ops\",\n [\n \"mean\",\n \"min\",\n \"max\",\n \"sum\",\n \"product\",\n \"var\",\n \"std\",\n \"prod\",\n \"kurtosis\",\n \"skew\",\n \"any\",\n \"all\",\n \"cummin\",\n \"cummax\",\n \"cumsum\",\n \"cumprod\",\n ],\n)\[email protected](\"skipna\", [True, False, None])\ndef test_nans_stats(data, ops, skipna):\n psr = cudf.utils.utils._create_pandas_series(data=data)\n gsr = cudf.Series(data, nan_as_null=False)\n\n assert_eq(\n getattr(psr, ops)(skipna=skipna), getattr(gsr, ops)(skipna=skipna)\n )\n\n psr = cudf.utils.utils._create_pandas_series(data=data)\n gsr = cudf.Series(data, nan_as_null=False)\n # Since there is no concept of `nan_as_null` in pandas,\n # nulls will be returned in the operations. So only\n # testing for `skipna=True` when `nan_as_null=False`\n assert_eq(getattr(psr, ops)(skipna=True), getattr(gsr, ops)(skipna=True))\n\n\[email protected](\n \"data\",\n [\n [0.0, 1, 3, 6, np.NaN, 7, 5.0, np.nan, 5, 2, 3, -100],\n [np.nan] * 3,\n [1, 5, 3],\n ],\n)\[email protected](\"ops\", [\"sum\", \"product\", \"prod\"])\[email protected](\"skipna\", [True, False, None])\[email protected](\"min_count\", [-10, -1, 0, 1, 2, 3, 5, 10])\ndef test_min_count_ops(data, ops, skipna, min_count):\n psr = pd.Series(data)\n gsr = cudf.Series(data, nan_as_null=False)\n\n assert_eq(\n getattr(psr, ops)(skipna=skipna, min_count=min_count),\n getattr(gsr, ops)(skipna=skipna, min_count=min_count),\n )\n\n\[email protected](\n \"gsr\",\n [\n cudf.Series([1, 2, 3, 4], dtype=\"datetime64[ns]\"),\n cudf.Series([1, 2, 3, 4], dtype=\"timedelta64[ns]\"),\n ],\n)\ndef test_cov_corr_invalid_dtypes(gsr):\n psr = gsr.to_pandas()\n\n assert_exceptions_equal(\n lfunc=psr.corr,\n rfunc=gsr.corr,\n lfunc_args_and_kwargs=([psr],),\n rfunc_args_and_kwargs=([gsr],),\n compare_error_message=False,\n )\n\n assert_exceptions_equal(\n lfunc=psr.cov,\n rfunc=gsr.cov,\n lfunc_args_and_kwargs=([psr],),\n rfunc_args_and_kwargs=([gsr],),\n compare_error_message=False,\n )\n\n\[email protected](\n \"data\",\n [\n randomdata(\n nrows=1000, dtypes={\"a\": float, \"b\": int, \"c\": float, \"d\": str}\n ),\n ],\n)\[email protected](\"null_flag\", [False, True])\ndef test_kurtosis_df(data, null_flag):\n pdata = data.to_pandas()\n\n if null_flag and len(data) > 2:\n data.iloc[[0, 2]] = None\n pdata.iloc[[0, 2]] = None\n\n got = data.kurtosis()\n got = got if np.isscalar(got) else got.to_numpy()\n expected = pdata.kurtosis()\n np.testing.assert_array_almost_equal(got, expected)\n\n got = data.kurt()\n got = got if np.isscalar(got) else got.to_numpy()\n expected = pdata.kurt()\n np.testing.assert_array_almost_equal(got, expected)\n\n got = data.kurt(numeric_only=True)\n got = got if np.isscalar(got) else got.to_numpy()\n expected = pdata.kurt(numeric_only=True)\n np.testing.assert_array_almost_equal(got, expected)\n\n\[email protected](\n \"data\",\n [\n randomdata(\n nrows=1000, dtypes={\"a\": float, \"b\": int, \"c\": float, \"d\": str}\n ),\n ],\n)\[email protected](\"null_flag\", [False, True])\ndef test_skew_df(data, null_flag):\n pdata = data.to_pandas()\n\n if null_flag and len(data) > 2:\n data.iloc[[0, 2]] = None\n pdata.iloc[[0, 2]] = None\n\n got = data.skew()\n expected = pdata.skew()\n got = got if np.isscalar(got) else got.to_numpy()\n np.testing.assert_array_almost_equal(got, expected)\n\n got = data.skew(numeric_only=True)\n expected = pdata.skew(numeric_only=True)\n got = got if np.isscalar(got) else got.to_numpy()\n np.testing.assert_array_almost_equal(got, expected)\n" ]
[ [ "numpy.random.random", "numpy.testing.assert_approx_equal", "numpy.random.seed", "numpy.abs", "numpy.asarray", "numpy.arange", "numpy.issubdtype", "numpy.random.normal", "numpy.random.randint", "numpy.isscalar", "numpy.repeat", "numpy.array", "numpy.zeros", "numpy.testing.assert_array_almost_equal" ] ]
arjun921/image-classification-for-dummies
[ "acefcd557699500d44902d17009da17820f289fc" ]
[ "retrain_serving.py" ]
[ "from __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport argparse\r\nfrom datetime import datetime\r\nimport hashlib\r\nimport os.path\r\nimport random\r\nimport re\r\nimport struct\r\nimport sys\r\nimport tarfile\r\n\r\nimport numpy as np\r\nfrom six.moves import urllib\r\nimport tensorflow as tf\r\n#import traceback\r\n\r\nfrom tensorflow.python.framework import graph_util\r\nfrom tensorflow.python.framework import tensor_shape\r\nfrom tensorflow.python.platform import gfile\r\nfrom tensorflow.python.util import compat\r\n#from tensorflow.python.saved_model import signature_constants\r\n\r\nFLAGS = None\r\n\r\n# These are all parameters that are tied to the particular model architecture\r\n# we're using for Inception v3. These include things like tensor names and their\r\n# sizes. If you want to adapt this script to work with another model, you will\r\n# need to update these to reflect the values in the network you're using.\r\n# pylint: disable=line-too-long\r\nDATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'\r\n# pylint: enable=line-too-long\r\nBOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'\r\nBOTTLENECK_TENSOR_SIZE = 2048\r\nMODEL_INPUT_WIDTH = 299\r\nMODEL_INPUT_HEIGHT = 299\r\nMODEL_INPUT_DEPTH = 3\r\nJPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'\r\nRESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'\r\nMAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M\r\n\r\n\r\ndef create_image_lists(image_dir, testing_percentage, validation_percentage):\r\n \"\"\"Builds a list of training images from the file system.\r\n\r\n Analyzes the sub folders in the image directory, splits them into stable\r\n training, testing, and validation sets, and returns a data structure\r\n describing the lists of images for each label and their paths.\r\n\r\n Args:\r\n image_dir: String path to a folder containing subfolders of images.\r\n testing_percentage: Integer percentage of the images to reserve for tests.\r\n validation_percentage: Integer percentage of images reserved for validation.\r\n\r\n Returns:\r\n A dictionary containing an entry for each label subfolder, with images split\r\n into training, testing, and validation sets within each label.\r\n \"\"\"\r\n if not gfile.Exists(image_dir):\r\n print(\"Image directory '\" + image_dir + \"' not found.\")\r\n return None\r\n result = {}\r\n sub_dirs = [x[0] for x in gfile.Walk(image_dir)]\r\n # The root directory comes first, so skip it.\r\n is_root_dir = True\r\n for sub_dir in sub_dirs:\r\n if is_root_dir:\r\n is_root_dir = False\r\n continue\r\n extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']\r\n file_list = []\r\n dir_name = os.path.basename(sub_dir)\r\n if dir_name == image_dir:\r\n continue\r\n print(\"Looking for images in '\" + dir_name + \"'\")\r\n for extension in extensions:\r\n file_glob = os.path.join(image_dir, dir_name, '*.' + extension)\r\n file_list.extend(gfile.Glob(file_glob))\r\n if not file_list:\r\n print('No files found')\r\n continue\r\n if len(file_list) < 20:\r\n print('WARNING: Folder has less than 20 images, which may cause issues.')\r\n elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:\r\n print('WARNING: Folder {} has more than {} images. Some images will '\r\n 'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))\r\n label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())\r\n training_images = []\r\n testing_images = []\r\n validation_images = []\r\n for file_name in file_list:\r\n base_name = os.path.basename(file_name)\r\n # We want to ignore anything after '_nohash_' in the file name when\r\n # deciding which set to put an image in, the data set creator has a way of\r\n # grouping photos that are close variations of each other. For example\r\n # this is used in the plant disease data set to group multiple pictures of\r\n # the same leaf.\r\n hash_name = re.sub(r'_nohash_.*$', '', file_name)\r\n # This looks a bit magical, but we need to decide whether this file should\r\n # go into the training, testing, or validation sets, and we want to keep\r\n # existing files in the same set even if more files are subsequently\r\n # added.\r\n # To do that, we need a stable way of deciding based on just the file name\r\n # itself, so we do a hash of that and then use that to generate a\r\n # probability value that we use to assign it.\r\n hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()\r\n percentage_hash = ((int(hash_name_hashed, 16) %\r\n (MAX_NUM_IMAGES_PER_CLASS + 1)) *\r\n (100.0 / MAX_NUM_IMAGES_PER_CLASS))\r\n if percentage_hash < validation_percentage:\r\n validation_images.append(base_name)\r\n elif percentage_hash < (testing_percentage + validation_percentage):\r\n testing_images.append(base_name)\r\n else:\r\n training_images.append(base_name)\r\n result[label_name] = {\r\n 'dir': dir_name,\r\n 'training': training_images,\r\n 'testing': testing_images,\r\n 'validation': validation_images,\r\n }\r\n return result\r\n\r\n\r\ndef get_image_path(image_lists, label_name, index, image_dir, category):\r\n \"\"\"\"Returns a path to an image for a label at the given index.\r\n\r\n Args:\r\n image_lists: Dictionary of training images for each label.\r\n label_name: Label string we want to get an image for.\r\n index: Int offset of the image we want. This will be moduloed by the\r\n available number of images for the label, so it can be arbitrarily large.\r\n image_dir: Root folder string of the subfolders containing the training\r\n images.\r\n category: Name string of set to pull images from - training, testing, or\r\n validation.\r\n\r\n Returns:\r\n File system path string to an image that meets the requested parameters.\r\n\r\n \"\"\"\r\n if label_name not in image_lists:\r\n tf.logging.fatal('Label does not exist %s.', label_name)\r\n label_lists = image_lists[label_name]\r\n if category not in label_lists:\r\n tf.logging.fatal('Category does not exist %s.', category)\r\n category_list = label_lists[category]\r\n if not category_list:\r\n tf.logging.fatal('Label %s has no images in the category %s.',\r\n label_name, category)\r\n mod_index = index % len(category_list)\r\n base_name = category_list[mod_index]\r\n sub_dir = label_lists['dir']\r\n full_path = os.path.join(image_dir, sub_dir, base_name)\r\n return full_path\r\n\r\n\r\ndef get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,\r\n category):\r\n \"\"\"\"Returns a path to a bottleneck file for a label at the given index.\r\n\r\n Args:\r\n image_lists: Dictionary of training images for each label.\r\n label_name: Label string we want to get an image for.\r\n index: Integer offset of the image we want. This will be moduloed by the\r\n available number of images for the label, so it can be arbitrarily large.\r\n bottleneck_dir: Folder string holding cached files of bottleneck values.\r\n category: Name string of set to pull images from - training, testing, or\r\n validation.\r\n\r\n Returns:\r\n File system path string to an image that meets the requested parameters.\r\n \"\"\"\r\n return get_image_path(image_lists, label_name, index, bottleneck_dir,\r\n category) + '.txt'\r\n\r\n\r\ndef create_inception_graph():\r\n \"\"\"\"Creates a graph from saved GraphDef file and returns a Graph object.\r\n\r\n Returns:\r\n Graph holding the trained Inception network, and various tensors we'll be\r\n manipulating.\r\n \"\"\"\r\n with tf.Session() as sess:\r\n model_filename = os.path.join(\r\n FLAGS.model_dir, 'classify_image_graph_def.pb')\r\n with gfile.FastGFile(model_filename, 'rb') as f:\r\n graph_def = tf.GraphDef()\r\n graph_def.ParseFromString(f.read())\r\n bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (\r\n tf.import_graph_def(graph_def, name='', return_elements=[\r\n BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,\r\n RESIZED_INPUT_TENSOR_NAME]))\r\n return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor\r\n\r\n\r\ndef run_bottleneck_on_image(sess, image_data, image_data_tensor,\r\n bottleneck_tensor):\r\n \"\"\"Runs inference on an image to extract the 'bottleneck' summary layer.\r\n\r\n Args:\r\n sess: Current active TensorFlow Session.\r\n image_data: String of raw JPEG data.\r\n image_data_tensor: Input data layer in the graph.\r\n bottleneck_tensor: Layer before the final softmax.\r\n\r\n Returns:\r\n Numpy array of bottleneck values.\r\n \"\"\"\r\n bottleneck_values = sess.run(\r\n bottleneck_tensor,\r\n {image_data_tensor: image_data})\r\n bottleneck_values = np.squeeze(bottleneck_values)\r\n return bottleneck_values\r\n\r\n\r\ndef maybe_download_and_extract():\r\n \"\"\"Download and extract model tar file.\r\n\r\n If the pretrained model we're using doesn't already exist, this function\r\n downloads it from the TensorFlow.org website and unpacks it into a directory.\r\n \"\"\"\r\n dest_directory = FLAGS.model_dir\r\n if not os.path.exists(dest_directory):\r\n os.makedirs(dest_directory)\r\n filename = DATA_URL.split('/')[-1]\r\n filepath = os.path.join(dest_directory, filename)\r\n if not os.path.exists(filepath):\r\n\r\n def _progress(count, block_size, total_size):\r\n sys.stdout.write('\\r>> Downloading %s %.1f%%' %\r\n (filename,\r\n float(count * block_size) / float(total_size) * 100.0))\r\n sys.stdout.flush()\r\n\r\n filepath, _ = urllib.request.urlretrieve(DATA_URL,\r\n filepath,\r\n _progress)\r\n print()\r\n statinfo = os.stat(filepath)\r\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\r\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\r\n\r\n\r\ndef ensure_dir_exists(dir_name):\r\n \"\"\"Makes sure the folder exists on disk.\r\n\r\n Args:\r\n dir_name: Path string to the folder we want to create.\r\n \"\"\"\r\n if not os.path.exists(dir_name):\r\n os.makedirs(dir_name)\r\n\r\n\r\ndef write_list_of_floats_to_file(list_of_floats , file_path):\r\n \"\"\"Writes a given list of floats to a binary file.\r\n\r\n Args:\r\n list_of_floats: List of floats we want to write to a file.\r\n file_path: Path to a file where list of floats will be stored.\r\n\r\n \"\"\"\r\n\r\n s = struct.pack('d' * BOTTLENECK_TENSOR_SIZE, *list_of_floats)\r\n with open(file_path, 'wb') as f:\r\n f.write(s)\r\n\r\n\r\ndef read_list_of_floats_from_file(file_path):\r\n \"\"\"Reads list of floats from a given file.\r\n\r\n Args:\r\n file_path: Path to a file where list of floats was stored.\r\n Returns:\r\n Array of bottleneck values (list of floats).\r\n\r\n \"\"\"\r\n\r\n with open(file_path, 'rb') as f:\r\n s = struct.unpack('d' * BOTTLENECK_TENSOR_SIZE, f.read())\r\n return list(s)\r\n\r\n\r\nbottleneck_path_2_bottleneck_values = {}\r\n\r\ndef create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\r\n image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor):\r\n print('Creating bottleneck at ' + bottleneck_path)\r\n image_path = get_image_path(image_lists, label_name, index, image_dir, category)\r\n if not gfile.Exists(image_path):\r\n tf.logging.fatal('File does not exist %s', image_path)\r\n image_data = gfile.FastGFile(image_path, 'rb').read()\r\n bottleneck_values = run_bottleneck_on_image(sess, image_data, jpeg_data_tensor, bottleneck_tensor)\r\n bottleneck_string = ','.join(str(x) for x in bottleneck_values)\r\n with open(bottleneck_path, 'w') as bottleneck_file:\r\n bottleneck_file.write(bottleneck_string)\r\n\r\ndef get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,\r\n category, bottleneck_dir, jpeg_data_tensor,\r\n bottleneck_tensor):\r\n \"\"\"Retrieves or calculates bottleneck values for an image.\r\n\r\n If a cached version of the bottleneck data exists on-disk, return that,\r\n otherwise calculate the data and save it to disk for future use.\r\n\r\n Args:\r\n sess: The current active TensorFlow Session.\r\n image_lists: Dictionary of training images for each label.\r\n label_name: Label string we want to get an image for.\r\n index: Integer offset of the image we want. This will be modulo-ed by the\r\n available number of images for the label, so it can be arbitrarily large.\r\n image_dir: Root folder string of the subfolders containing the training\r\n images.\r\n category: Name string of which set to pull images from - training, testing,\r\n or validation.\r\n bottleneck_dir: Folder string holding cached files of bottleneck values.\r\n jpeg_data_tensor: The tensor to feed loaded jpeg data into.\r\n bottleneck_tensor: The output tensor for the bottleneck values.\r\n\r\n Returns:\r\n Numpy array of values produced by the bottleneck layer for the image.\r\n \"\"\"\r\n label_lists = image_lists[label_name]\r\n sub_dir = label_lists['dir']\r\n sub_dir_path = os.path.join(bottleneck_dir, sub_dir)\r\n ensure_dir_exists(sub_dir_path)\r\n bottleneck_path = get_bottleneck_path(image_lists, label_name, index, bottleneck_dir, category)\r\n if not os.path.exists(bottleneck_path):\r\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor)\r\n with open(bottleneck_path, 'r') as bottleneck_file:\r\n bottleneck_string = bottleneck_file.read()\r\n did_hit_error = False\r\n try:\r\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\r\n except:\r\n print(\"Invalid float found, recreating bottleneck\")\r\n did_hit_error = True\r\n if did_hit_error:\r\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category, sess, jpeg_data_tensor, bottleneck_tensor)\r\n with open(bottleneck_path, 'r') as bottleneck_file:\r\n bottleneck_string = bottleneck_file.read()\r\n # Allow exceptions to propagate here, since they shouldn't happen after a fresh creation\r\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\r\n return bottleneck_values\r\n\r\ndef cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,\r\n jpeg_data_tensor, bottleneck_tensor):\r\n \"\"\"Ensures all the training, testing, and validation bottlenecks are cached.\r\n\r\n Because we're likely to read the same image multiple times (if there are no\r\n distortions applied during training) it can speed things up a lot if we\r\n calculate the bottleneck layer values once for each image during\r\n preprocessing, and then just read those cached values repeatedly during\r\n training. Here we go through all the images we've found, calculate those\r\n values, and save them off.\r\n\r\n Args:\r\n sess: The current active TensorFlow Session.\r\n image_lists: Dictionary of training images for each label.\r\n image_dir: Root folder string of the subfolders containing the training\r\n images.\r\n bottleneck_dir: Folder string holding cached files of bottleneck values.\r\n jpeg_data_tensor: Input tensor for jpeg data from file.\r\n bottleneck_tensor: The penultimate output layer of the graph.\r\n\r\n Returns:\r\n Nothing.\r\n \"\"\"\r\n how_many_bottlenecks = 0\r\n ensure_dir_exists(bottleneck_dir)\r\n for label_name, label_lists in image_lists.items():\r\n for category in ['training', 'testing', 'validation']:\r\n category_list = label_lists[category]\r\n for index, unused_base_name in enumerate(category_list):\r\n get_or_create_bottleneck(sess, image_lists, label_name, index,\r\n image_dir, category, bottleneck_dir,\r\n jpeg_data_tensor, bottleneck_tensor)\r\n\r\n how_many_bottlenecks += 1\r\n if how_many_bottlenecks % 100 == 0:\r\n print(str(how_many_bottlenecks) + ' bottleneck files created.')\r\n\r\n\r\ndef get_random_cached_bottlenecks(sess, image_lists, how_many, category,\r\n bottleneck_dir, image_dir, jpeg_data_tensor,\r\n bottleneck_tensor):\r\n \"\"\"Retrieves bottleneck values for cached images.\r\n\r\n If no distortions are being applied, this function can retrieve the cached\r\n bottleneck values directly from disk for images. It picks a random set of\r\n images from the specified category.\r\n\r\n Args:\r\n sess: Current TensorFlow Session.\r\n image_lists: Dictionary of training images for each label.\r\n how_many: If positive, a random sample of this size will be chosen.\r\n If negative, all bottlenecks will be retrieved.\r\n category: Name string of which set to pull from - training, testing, or\r\n validation.\r\n bottleneck_dir: Folder string holding cached files of bottleneck values.\r\n image_dir: Root folder string of the subfolders containing the training\r\n images.\r\n jpeg_data_tensor: The layer to feed jpeg image data into.\r\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\r\n\r\n Returns:\r\n List of bottleneck arrays, their corresponding ground truths, and the\r\n relevant filenames.\r\n \"\"\"\r\n class_count = len(image_lists.keys())\r\n bottlenecks = []\r\n ground_truths = []\r\n filenames = []\r\n if how_many >= 0:\r\n # Retrieve a random sample of bottlenecks.\r\n for unused_i in range(how_many):\r\n label_index = random.randrange(class_count)\r\n label_name = list(image_lists.keys())[label_index]\r\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\r\n image_name = get_image_path(image_lists, label_name, image_index,\r\n image_dir, category)\r\n bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,\r\n image_index, image_dir, category,\r\n bottleneck_dir, jpeg_data_tensor,\r\n bottleneck_tensor)\r\n ground_truth = np.zeros(class_count, dtype=np.float32)\r\n ground_truth[label_index] = 1.0\r\n bottlenecks.append(bottleneck)\r\n ground_truths.append(ground_truth)\r\n filenames.append(image_name)\r\n else:\r\n # Retrieve all bottlenecks.\r\n for label_index, label_name in enumerate(image_lists.keys()):\r\n for image_index, image_name in enumerate(\r\n image_lists[label_name][category]):\r\n image_name = get_image_path(image_lists, label_name, image_index,\r\n image_dir, category)\r\n bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,\r\n image_index, image_dir, category,\r\n bottleneck_dir, jpeg_data_tensor,\r\n bottleneck_tensor)\r\n ground_truth = np.zeros(class_count, dtype=np.float32)\r\n ground_truth[label_index] = 1.0\r\n bottlenecks.append(bottleneck)\r\n ground_truths.append(ground_truth)\r\n filenames.append(image_name)\r\n return bottlenecks, ground_truths, filenames\r\n\r\n\r\ndef get_random_distorted_bottlenecks(\r\n sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,\r\n distorted_image, resized_input_tensor, bottleneck_tensor):\r\n \"\"\"Retrieves bottleneck values for training images, after distortions.\r\n\r\n If we're training with distortions like crops, scales, or flips, we have to\r\n recalculate the full model for every image, and so we can't use cached\r\n bottleneck values. Instead we find random images for the requested category,\r\n run them through the distortion graph, and then the full graph to get the\r\n bottleneck results for each.\r\n\r\n Args:\r\n sess: Current TensorFlow Session.\r\n image_lists: Dictionary of training images for each label.\r\n how_many: The integer number of bottleneck values to return.\r\n category: Name string of which set of images to fetch - training, testing,\r\n or validation.\r\n image_dir: Root folder string of the subfolders containing the training\r\n images.\r\n input_jpeg_tensor: The input layer we feed the image data to.\r\n distorted_image: The output node of the distortion graph.\r\n resized_input_tensor: The input node of the recognition graph.\r\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\r\n\r\n Returns:\r\n List of bottleneck arrays and their corresponding ground truths.\r\n \"\"\"\r\n class_count = len(image_lists.keys())\r\n bottlenecks = []\r\n ground_truths = []\r\n for unused_i in range(how_many):\r\n label_index = random.randrange(class_count)\r\n label_name = list(image_lists.keys())[label_index]\r\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\r\n image_path = get_image_path(image_lists, label_name, image_index, image_dir,\r\n category)\r\n if not gfile.Exists(image_path):\r\n tf.logging.fatal('File does not exist %s', image_path)\r\n jpeg_data = gfile.FastGFile(image_path, 'rb').read()\r\n # Note that we materialize the distorted_image_data as a numpy array before\r\n # sending running inference on the image. This involves 2 memory copies and\r\n # might be optimized in other implementations.\r\n distorted_image_data = sess.run(distorted_image,\r\n {input_jpeg_tensor: jpeg_data})\r\n bottleneck = run_bottleneck_on_image(sess, distorted_image_data,\r\n resized_input_tensor,\r\n bottleneck_tensor)\r\n ground_truth = np.zeros(class_count, dtype=np.float32)\r\n ground_truth[label_index] = 1.0\r\n bottlenecks.append(bottleneck)\r\n ground_truths.append(ground_truth)\r\n return bottlenecks, ground_truths\r\n\r\n\r\ndef should_distort_images(flip_left_right, random_crop, random_scale,\r\n random_brightness):\r\n \"\"\"Whether any distortions are enabled, from the input flags.\r\n\r\n Args:\r\n flip_left_right: Boolean whether to randomly mirror images horizontally.\r\n random_crop: Integer percentage setting the total margin used around the\r\n crop box.\r\n random_scale: Integer percentage of how much to vary the scale by.\r\n random_brightness: Integer range to randomly multiply the pixel values by.\r\n\r\n Returns:\r\n Boolean value indicating whether any distortions should be applied.\r\n \"\"\"\r\n return (flip_left_right or (random_crop != 0) or (random_scale != 0) or\r\n (random_brightness != 0))\r\n\r\n\r\ndef add_input_distortions(flip_left_right, random_crop, random_scale,\r\n random_brightness):\r\n \"\"\"Creates the operations to apply the specified distortions.\r\n\r\n During training it can help to improve the results if we run the images\r\n through simple distortions like crops, scales, and flips. These reflect the\r\n kind of variations we expect in the real world, and so can help train the\r\n model to cope with natural data more effectively. Here we take the supplied\r\n parameters and construct a network of operations to apply them to an image.\r\n\r\n Cropping\r\n ~~~~~~~~\r\n\r\n Cropping is done by placing a bounding box at a random position in the full\r\n image. The cropping parameter controls the size of that box relative to the\r\n input image. If it's zero, then the box is the same size as the input and no\r\n cropping is performed. If the value is 50%, then the crop box will be half the\r\n width and height of the input. In a diagram it looks like this:\r\n\r\n < width >\r\n +---------------------+\r\n | |\r\n | width - crop% |\r\n | < > |\r\n | +------+ |\r\n | | | |\r\n | | | |\r\n | | | |\r\n | +------+ |\r\n | |\r\n | |\r\n +---------------------+\r\n\r\n Scaling\r\n ~~~~~~~\r\n\r\n Scaling is a lot like cropping, except that the bounding box is always\r\n centered and its size varies randomly within the given range. For example if\r\n the scale percentage is zero, then the bounding box is the same size as the\r\n input and no scaling is applied. If it's 50%, then the bounding box will be in\r\n a random range between half the width and height and full size.\r\n\r\n Args:\r\n flip_left_right: Boolean whether to randomly mirror images horizontally.\r\n random_crop: Integer percentage setting the total margin used around the\r\n crop box.\r\n random_scale: Integer percentage of how much to vary the scale by.\r\n random_brightness: Integer range to randomly multiply the pixel values by.\r\n graph.\r\n\r\n Returns:\r\n The jpeg input layer and the distorted result tensor.\r\n \"\"\"\r\n\r\n jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')\r\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH)\r\n decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\r\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\r\n margin_scale = 1.0 + (random_crop / 100.0)\r\n resize_scale = 1.0 + (random_scale / 100.0)\r\n margin_scale_value = tf.constant(margin_scale)\r\n resize_scale_value = tf.random_uniform(tensor_shape.scalar(),\r\n minval=1.0,\r\n maxval=resize_scale)\r\n scale_value = tf.multiply(margin_scale_value, resize_scale_value)\r\n precrop_width = tf.multiply(scale_value, MODEL_INPUT_WIDTH)\r\n precrop_height = tf.multiply(scale_value, MODEL_INPUT_HEIGHT)\r\n precrop_shape = tf.stack([precrop_height, precrop_width])\r\n precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)\r\n precropped_image = tf.image.resize_bilinear(decoded_image_4d,\r\n precrop_shape_as_int)\r\n precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])\r\n cropped_image = tf.random_crop(precropped_image_3d,\r\n [MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH,\r\n MODEL_INPUT_DEPTH])\r\n if flip_left_right:\r\n flipped_image = tf.image.random_flip_left_right(cropped_image)\r\n else:\r\n flipped_image = cropped_image\r\n brightness_min = 1.0 - (random_brightness / 100.0)\r\n brightness_max = 1.0 + (random_brightness / 100.0)\r\n brightness_value = tf.random_uniform(tensor_shape.scalar(),\r\n minval=brightness_min,\r\n maxval=brightness_max)\r\n brightened_image = tf.multiply(flipped_image, brightness_value)\r\n distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')\r\n return jpeg_data, distort_result\r\n\r\n\r\ndef variable_summaries(var):\r\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\r\n with tf.name_scope('summaries'):\r\n mean = tf.reduce_mean(var)\r\n tf.summary.scalar('mean', mean)\r\n with tf.name_scope('stddev'):\r\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\r\n tf.summary.scalar('stddev', stddev)\r\n tf.summary.scalar('max', tf.reduce_max(var))\r\n tf.summary.scalar('min', tf.reduce_min(var))\r\n tf.summary.histogram('histogram', var)\r\n\r\n\r\ndef add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):\r\n \"\"\"Adds a new softmax and fully-connected layer for training.\r\n\r\n We need to retrain the top layer to identify our new classes, so this function\r\n adds the right operations to the graph, along with some variables to hold the\r\n weights, and then sets up all the gradients for the backward pass.\r\n\r\n The set up for the softmax and fully-connected layers is based on:\r\n https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html\r\n\r\n Args:\r\n class_count: Integer of how many categories of things we're trying to\r\n recognize.\r\n final_tensor_name: Name string for the new final node that produces results.\r\n bottleneck_tensor: The output of the main CNN graph.\r\n\r\n Returns:\r\n The tensors for the training and cross entropy results, and tensors for the\r\n bottleneck input and ground truth input.\r\n \"\"\"\r\n with tf.name_scope('input'):\r\n bottleneck_input = tf.placeholder_with_default(\r\n bottleneck_tensor, shape=[None, BOTTLENECK_TENSOR_SIZE],\r\n name='BottleneckInputPlaceholder')\r\n\r\n ground_truth_input = tf.placeholder(tf.float32,\r\n [None, class_count],\r\n name='GroundTruthInput')\r\n\r\n # Organizing the following ops as `final_training_ops` so they're easier\r\n # to see in TensorBoard\r\n layer_name = 'final_training_ops'\r\n with tf.name_scope(layer_name):\r\n with tf.name_scope('weights'):\r\n layer_weights = tf.Variable(tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count], stddev=0.001), name='final_weights')\r\n variable_summaries(layer_weights)\r\n with tf.name_scope('biases'):\r\n layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')\r\n variable_summaries(layer_biases)\r\n with tf.name_scope('Wx_plus_b'):\r\n logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases\r\n tf.summary.histogram('pre_activations', logits)\r\n\r\n final_tensor = tf.nn.softmax(logits, name=final_tensor_name)\r\n tf.summary.histogram('activations', final_tensor)\r\n\r\n with tf.name_scope('cross_entropy'):\r\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\r\n labels=ground_truth_input, logits=logits)\r\n with tf.name_scope('total'):\r\n cross_entropy_mean = tf.reduce_mean(cross_entropy)\r\n tf.summary.scalar('cross_entropy', cross_entropy_mean)\r\n\r\n with tf.name_scope('train'):\r\n train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(\r\n cross_entropy_mean)\r\n\r\n return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,\r\n final_tensor)\r\n\r\n\r\ndef add_evaluation_step(result_tensor, ground_truth_tensor):\r\n \"\"\"Inserts the operations we need to evaluate the accuracy of our results.\r\n\r\n Args:\r\n result_tensor: The new final node that produces results.\r\n ground_truth_tensor: The node we feed ground truth data\r\n into.\r\n\r\n Returns:\r\n Tuple of (evaluation step, prediction).\r\n \"\"\"\r\n with tf.name_scope('accuracy'):\r\n with tf.name_scope('correct_prediction'):\r\n prediction = tf.argmax(result_tensor, 1)\r\n correct_prediction = tf.equal(\r\n prediction, tf.argmax(ground_truth_tensor, 1))\r\n with tf.name_scope('accuracy'):\r\n evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n tf.summary.scalar('accuracy', evaluation_step)\r\n return evaluation_step, prediction\r\n\r\n#\r\n#def export_model(sess, saved_model_dir):\r\n# architecture = 'inception_v3'\r\n# if architecture == 'inception_v3':\r\n# input_tensor = 'DecodeJpeg/contents:0'\r\n# elif architecture.startswith('mobilenet_'):\r\n# input_tensor = 'input:0'\r\n# else:\r\n# raise ValueError('Unknown architecture', architecture)\r\n# in_image = sess.graph.get_tensor_by_name(input_tensor)\r\n# inputs = {'image': tf.saved_model.utils.build_tensor_info(in_image)}\r\n# \r\n# out_classes = sess.graph.get_tensor_by_name('final_result:0')\r\n# outputs = {'prediction': tf.saved_model.utils.build_tensor_info(out_classes)}\r\n#\r\n# signature = tf.saved_model.signature_def_utils.build_signature_def(\r\n# inputs=inputs,\r\n# outputs=outputs,\r\n# method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME\r\n# )\r\n#\r\n# legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')\r\n# print(tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)\r\n# print(tf.saved_model.signature_constants.PREDICT_METHOD_NAME)\r\n# \r\n#\r\n# try: \r\n# # Save out the SavedModel.\r\n# builder = tf.saved_model.builder.SavedModelBuilder(saved_model_dir)\r\n# builder.add_meta_graph_and_variables(\r\n# sess, [tf.saved_model.tag_constants.SERVING],\r\n# signature_def_map={\r\n# tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature\r\n# },\r\n# legacy_init_op=legacy_init_op)\r\n# builder.save()\r\n# except IndexError:\r\n# exc_type, exc_value, exc_traceback = sys.exc_info()\r\n# traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)\r\n\r\ndef export_model(sess,keys, architecture, saved_model_dir):\r\n if architecture == 'inception_v3':\r\n input_tensor = 'DecodeJpeg/contents:0'\r\n elif architecture.startswith('mobilenet_'):\r\n input_tensor = 'input:0'\r\n else:\r\n raise ValueError('Unknown architecture', architecture)\r\n in_image = sess.graph.get_tensor_by_name(input_tensor)\r\n inputs = {'image': tf.saved_model.utils.build_tensor_info(in_image)}\r\n\r\n out_classes = sess.graph.get_tensor_by_name('final_result:0')\r\n #outputs = {'prediction': tf.saved_model.utils.build_tensor_info(out_classes)}\r\n outputs = {'prediction': tf.saved_model.utils.build_tensor_info(out_classes),\r\n 'classes': tf.saved_model.utils.build_tensor_info(tf.convert_to_tensor(list(keys))),}\r\n\r\n signature = tf.saved_model.signature_def_utils.build_signature_def(\r\n inputs=inputs,\r\n outputs=outputs,\r\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME\r\n )\r\n\r\n legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')\r\n\r\n # Save out the SavedModel.\r\n builder = tf.saved_model.builder.SavedModelBuilder(saved_model_dir)\r\n builder.add_meta_graph_and_variables(\r\n sess, [tf.saved_model.tag_constants.SERVING],\r\n signature_def_map={\r\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature\r\n },\r\n legacy_init_op=legacy_init_op)\r\n builder.save()\r\n \r\n \r\n\r\ndef main(_):\r\n # Setup the directory we'll write summaries to for TensorBoard\r\n if tf.gfile.Exists(FLAGS.summaries_dir):\r\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\r\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\r\n\r\n # Set up the pre-trained graph.\r\n maybe_download_and_extract()\r\n graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (\r\n create_inception_graph())\r\n\r\n # Look at the folder structure, and create lists of all the images.\r\n image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,\r\n FLAGS.validation_percentage)\r\n class_count = len(image_lists.keys())\r\n if class_count == 0:\r\n print('No valid folders of images found at ' + FLAGS.image_dir)\r\n return -1\r\n if class_count == 1:\r\n print('Only one valid folder of images found at ' + FLAGS.image_dir +\r\n ' - multiple classes are needed for classification.')\r\n return -1\r\n\r\n # See if the command-line flags mean we're applying any distortions.\r\n do_distort_images = should_distort_images(\r\n FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,\r\n FLAGS.random_brightness)\r\n sess = tf.Session()\r\n\r\n if do_distort_images:\r\n # We will be applying distortions, so setup the operations we'll need.\r\n distorted_jpeg_data_tensor, distorted_image_tensor = add_input_distortions(\r\n FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,\r\n FLAGS.random_brightness)\r\n else:\r\n # We'll make sure we've calculated the 'bottleneck' image summaries and\r\n # cached them on disk.\r\n cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir,\r\n jpeg_data_tensor, bottleneck_tensor)\r\n\r\n # Add the new layer that we'll be training.\r\n (train_step, cross_entropy, bottleneck_input, ground_truth_input,\r\n final_tensor) = add_final_training_ops(len(image_lists.keys()),\r\n FLAGS.final_tensor_name,\r\n bottleneck_tensor)\r\n\r\n # Create the operations we need to evaluate the accuracy of our new layer.\r\n evaluation_step, prediction = add_evaluation_step(\r\n final_tensor, ground_truth_input)\r\n\r\n # Merge all the summaries and write them out to /tmp/retrain_logs (by default)\r\n merged = tf.summary.merge_all()\r\n train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',\r\n sess.graph)\r\n validation_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/validation')\r\n\r\n # Set up all our weights to their initial default values.\r\n init = tf.global_variables_initializer()\r\n sess.run(init)\r\n\r\n # Run the training for as many cycles as requested on the command line.\r\n for i in range(FLAGS.how_many_training_steps):\r\n # Get a batch of input bottleneck values, either calculated fresh every time\r\n # with distortions applied, or from the cache stored on disk.\r\n if do_distort_images:\r\n train_bottlenecks, train_ground_truth = get_random_distorted_bottlenecks(\r\n sess, image_lists, FLAGS.train_batch_size, 'training',\r\n FLAGS.image_dir, distorted_jpeg_data_tensor,\r\n distorted_image_tensor, resized_image_tensor, bottleneck_tensor)\r\n else:\r\n train_bottlenecks, train_ground_truth, _ = get_random_cached_bottlenecks(\r\n sess, image_lists, FLAGS.train_batch_size, 'training',\r\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\r\n bottleneck_tensor)\r\n # Feed the bottlenecks and ground truth into the graph, and run a training\r\n # step. Capture training summaries for TensorBoard with the `merged` op.\r\n train_summary, _ = sess.run([merged, train_step],\r\n feed_dict={bottleneck_input: train_bottlenecks,\r\n ground_truth_input: train_ground_truth})\r\n train_writer.add_summary(train_summary, i)\r\n\r\n # Every so often, print out how well the graph is training.\r\n is_last_step = (i + 1 == FLAGS.how_many_training_steps)\r\n if (i % FLAGS.eval_step_interval) == 0 or is_last_step:\r\n train_accuracy, cross_entropy_value = sess.run(\r\n [evaluation_step, cross_entropy],\r\n feed_dict={bottleneck_input: train_bottlenecks,\r\n ground_truth_input: train_ground_truth})\r\n print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i,\r\n train_accuracy * 100))\r\n print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i,\r\n cross_entropy_value))\r\n validation_bottlenecks, validation_ground_truth, _ = (\r\n get_random_cached_bottlenecks(\r\n sess, image_lists, FLAGS.validation_batch_size, 'validation',\r\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\r\n bottleneck_tensor))\r\n # Run a validation step and capture training summaries for TensorBoard\r\n # with the `merged` op.\r\n validation_summary, validation_accuracy = sess.run(\r\n [merged, evaluation_step],\r\n feed_dict={bottleneck_input: validation_bottlenecks,\r\n ground_truth_input: validation_ground_truth})\r\n validation_writer.add_summary(validation_summary, i)\r\n print('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %\r\n (datetime.now(), i, validation_accuracy * 100,\r\n len(validation_bottlenecks)))\r\n\r\n # We've completed all our training, so run a final test evaluation on\r\n # some new images we haven't used before.\r\n test_bottlenecks, test_ground_truth, test_filenames = (\r\n get_random_cached_bottlenecks(sess, image_lists, FLAGS.test_batch_size,\r\n 'testing', FLAGS.bottleneck_dir,\r\n FLAGS.image_dir, jpeg_data_tensor,\r\n bottleneck_tensor))\r\n test_accuracy, predictions = sess.run(\r\n [evaluation_step, prediction],\r\n feed_dict={bottleneck_input: test_bottlenecks,\r\n ground_truth_input: test_ground_truth})\r\n print('Final test accuracy = %.1f%% (N=%d)' % (\r\n test_accuracy * 100, len(test_bottlenecks)))\r\n\r\n if FLAGS.print_misclassified_test_images:\r\n print('=== MISCLASSIFIED TEST IMAGES ===')\r\n for i, test_filename in enumerate(test_filenames):\r\n if predictions[i] != test_ground_truth[i].argmax():\r\n print('%70s %s' % (test_filename,\r\n list(image_lists.keys())[predictions[i]]))\r\n\r\n # Write out the trained graph and labels with the weights stored as constants.\r\n output_graph_def = graph_util.convert_variables_to_constants(\r\n sess, graph.as_graph_def(), [FLAGS.final_tensor_name])\r\n with gfile.FastGFile(FLAGS.output_graph, 'wb') as f:\r\n f.write(output_graph_def.SerializeToString())\r\n with gfile.FastGFile(FLAGS.output_labels, 'w') as f:\r\n f.write('\\n'.join(image_lists.keys()) + '\\n')\r\n\t\r\n export_model(sess,image_lists.keys(), FLAGS.architecture, FLAGS.saved_model_dir)\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n \r\n parser.add_argument(\r\n '--image_dir',\r\n type=str,\r\n default='',\r\n help='Path to folders of labeled images.'\r\n )\r\n parser.add_argument(\r\n '--output_graph',\r\n type=str,\r\n default='/tmp/output_graph.pb',\r\n help='Where to save the trained graph.'\r\n )\r\n parser.add_argument(\r\n '--output_labels',\r\n type=str,\r\n default='/tmp/output_labels.txt',\r\n help='Where to save the trained graph\\'s labels.'\r\n )\r\n parser.add_argument(\r\n '--summaries_dir',\r\n type=str,\r\n default='/tmp/retrain_logs',\r\n help='Where to save summary logs for TensorBoard.'\r\n )\r\n parser.add_argument(\r\n '--how_many_training_steps',\r\n type=int,\r\n default=4000,\r\n help='How many training steps to run before ending.'\r\n )\r\n parser.add_argument(\r\n '--learning_rate',\r\n type=float,\r\n default=0.01,\r\n help='How large a learning rate to use when training.'\r\n )\r\n parser.add_argument(\r\n '--testing_percentage',\r\n type=int,\r\n default=10,\r\n help='What percentage of images to use as a test set.'\r\n )\r\n parser.add_argument(\r\n '--validation_percentage',\r\n type=int,\r\n default=10,\r\n help='What percentage of images to use as a validation set.'\r\n )\r\n parser.add_argument(\r\n '--eval_step_interval',\r\n type=int,\r\n default=10,\r\n help='How often to evaluate the training results.'\r\n )\r\n parser.add_argument(\r\n '--train_batch_size',\r\n type=int,\r\n default=100,\r\n help='How many images to train on at a time.'\r\n )\r\n parser.add_argument(\r\n '--test_batch_size',\r\n type=int,\r\n default=-1,\r\n help=\"\"\"\\\r\n How many images to test on. This test set is only used once, to evaluate\r\n the final accuracy of the model after training completes.\r\n A value of -1 causes the entire test set to be used, which leads to more\r\n stable results across runs.\\\r\n \"\"\"\r\n )\r\n parser.add_argument(\r\n '--validation_batch_size',\r\n type=int,\r\n default=100,\r\n help=\"\"\"\\\r\n How many images to use in an evaluation batch. This validation set is\r\n used much more often than the test set, and is an early indicator of how\r\n accurate the model is during training.\r\n A value of -1 causes the entire validation set to be used, which leads to\r\n more stable results across training iterations, but may be slower on large\r\n training sets.\\\r\n \"\"\"\r\n )\r\n parser.add_argument(\r\n '--print_misclassified_test_images',\r\n default=False,\r\n help=\"\"\"\\\r\n Whether to print out a list of all misclassified test images.\\\r\n \"\"\",\r\n action='store_true'\r\n )\r\n parser.add_argument(\r\n '--model_dir',\r\n type=str,\r\n default='/tmp/imagenet',\r\n help=\"\"\"\\\r\n Path to classify_image_graph_def.pb,\r\n imagenet_synset_to_human_label_map.txt, and\r\n imagenet_2012_challenge_label_map_proto.pbtxt.\\\r\n \"\"\"\r\n )\r\n parser.add_argument(\r\n '--bottleneck_dir',\r\n type=str,\r\n default='/tmp/bottleneck',\r\n help='Path to cache bottleneck layer values as files.'\r\n )\r\n parser.add_argument(\r\n '--final_tensor_name',\r\n type=str,\r\n default='final_result',\r\n help=\"\"\"\\\r\n The name of the output classification layer in the retrained graph.\\\r\n \"\"\"\r\n )\r\n parser.add_argument(\r\n '--flip_left_right',\r\n default=False,\r\n help=\"\"\"\\\r\n Whether to randomly flip half of the training images horizontally.\\\r\n \"\"\",\r\n action='store_true'\r\n )\r\n parser.add_argument(\r\n '--random_crop',\r\n type=int,\r\n default=0,\r\n help=\"\"\"\\\r\n A percentage determining how much of a margin to randomly crop off the\r\n training images.\\\r\n \"\"\"\r\n )\r\n parser.add_argument(\r\n '--random_scale',\r\n type=int,\r\n default=0,\r\n help=\"\"\"\\\r\n A percentage determining how much to randomly scale up the size of the\r\n training images by.\\\r\n \"\"\"\r\n )\r\n parser.add_argument(\r\n '--saved_model_dir',\r\n type=str,\r\n default='/tmp/saved_models/1/',\r\n help='Where to save the exported graph.'\r\n )\r\n parser.add_argument(\r\n '--random_brightness',\r\n type=int,\r\n default=0,\r\n help=\"\"\"\\\r\n A percentage determining how much to randomly multiply the training image\r\n input pixels up or down by.\\\r\n \"\"\"\r\n )\r\n parser.add_argument(\r\n '--architecture',\r\n type=str,\r\n default='inception_v3',\r\n help=\"\"\"\\\r\n Which model architecture to use. 'inception_v3' is the most accurate, but\r\n also the slowest. For faster or smaller models, chose a MobileNet with the\r\n form 'mobilenet_<parameter size>_<input_size>[_quantized]'. For example,\r\n 'mobilenet_1.0_224' will pick a model that is 17 MB in size and takes 224\r\n pixel input images, while 'mobilenet_0.25_128_quantized' will choose a much\r\n less accurate, but smaller and faster network that's 920 KB on disk and\r\n takes 128x128 images. See https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html\r\n for more information on Mobilenet.\\\r\n \"\"\")\r\n \r\n FLAGS, unparsed = parser.parse_known_args()\r\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)" ]
[ [ "tensorflow.python.framework.tensor_shape.scalar", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.python.platform.gfile.Walk", "tensorflow.gfile.DeleteRecursively", "tensorflow.zeros", "tensorflow.gfile.Exists", "tensorflow.stack", "numpy.squeeze", "tensorflow.cast", "tensorflow.python.platform.gfile.Exists", "tensorflow.gfile.MakeDirs", "tensorflow.summary.scalar", "tensorflow.import_graph_def", "tensorflow.image.random_flip_left_right", "tensorflow.placeholder_with_default", "tensorflow.squeeze", "tensorflow.name_scope", "tensorflow.Session", "tensorflow.square", "tensorflow.argmax", "numpy.zeros", "tensorflow.logging.fatal", "tensorflow.image.decode_jpeg", "tensorflow.app.run", "tensorflow.image.resize_bilinear", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.saved_model.builder.SavedModelBuilder", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.summary.merge_all", "tensorflow.python.platform.gfile.FastGFile", "tensorflow.saved_model.utils.build_tensor_info", "tensorflow.summary.histogram", "tensorflow.reduce_max", "tensorflow.multiply", "tensorflow.constant", "tensorflow.nn.softmax", "tensorflow.saved_model.signature_def_utils.build_signature_def", "tensorflow.summary.FileWriter", "tensorflow.reduce_mean", "tensorflow.python.platform.gfile.Glob", "tensorflow.expand_dims", "tensorflow.random_crop", "tensorflow.python.util.compat.as_bytes", "tensorflow.reduce_min", "tensorflow.GraphDef", "tensorflow.tables_initializer" ] ]
Pandinosaurus/mne-python
[ "6ae3b22033c745cce5cd5de9b92da54c13c36484" ]
[ "mne/source_estimate.py" ]
[ "# Authors: Alexandre Gramfort <[email protected]>\n# Matti Hämäläinen <[email protected]>\n# Martin Luessi <[email protected]>\n# Mads Jensen <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport contextlib\nimport copy\nimport os.path as op\nfrom types import GeneratorType\n\nimport numpy as np\nfrom scipy import linalg, sparse\nfrom scipy.sparse import coo_matrix, block_diag as sparse_block_diag\n\nfrom .baseline import rescale\nfrom .cov import Covariance\nfrom .evoked import _get_peak\nfrom .filter import resample\nfrom .io.constants import FIFF\nfrom .surface import (read_surface, _get_ico_surface, mesh_edges,\n _project_onto_surface)\nfrom .source_space import (_ensure_src, _get_morph_src_reordering,\n _ensure_src_subject, SourceSpaces, _get_src_nn,\n _import_nibabel, _get_mri_info_data,\n _get_atlas_values, _check_volume_labels,\n read_freesurfer_lut)\nfrom .transforms import _get_trans, apply_trans\nfrom .utils import (get_subjects_dir, _check_subject, logger, verbose, _pl,\n _time_mask, warn, copy_function_doc_to_method_doc,\n fill_doc, _check_option, _validate_type, _check_src_normal,\n _check_stc_units, _check_pandas_installed,\n _check_pandas_index_arguments, _convert_times, _ensure_int,\n _build_data_frame, _check_time_format, _check_path_like,\n sizeof_fmt, object_size)\nfrom .viz import (plot_source_estimates, plot_vector_source_estimates,\n plot_volume_source_estimates)\nfrom .io.base import TimeMixin\nfrom .io.meas_info import Info\nfrom .externals.h5io import read_hdf5, write_hdf5\n\n\ndef _read_stc(filename):\n \"\"\"Aux Function.\"\"\"\n with open(filename, 'rb') as fid:\n buf = fid.read()\n\n stc = dict()\n offset = 0\n num_bytes = 4\n\n # read tmin in ms\n stc['tmin'] = float(np.frombuffer(buf, dtype=\">f4\", count=1,\n offset=offset))\n stc['tmin'] /= 1000.0\n offset += num_bytes\n\n # read sampling rate in ms\n stc['tstep'] = float(np.frombuffer(buf, dtype=\">f4\", count=1,\n offset=offset))\n stc['tstep'] /= 1000.0\n offset += num_bytes\n\n # read number of vertices/sources\n vertices_n = int(np.frombuffer(buf, dtype=\">u4\", count=1, offset=offset))\n offset += num_bytes\n\n # read the source vector\n stc['vertices'] = np.frombuffer(buf, dtype=\">u4\", count=vertices_n,\n offset=offset)\n offset += num_bytes * vertices_n\n\n # read the number of timepts\n data_n = int(np.frombuffer(buf, dtype=\">u4\", count=1, offset=offset))\n offset += num_bytes\n\n if (vertices_n and # vertices_n can be 0 (empty stc)\n ((len(buf) // 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0):\n raise ValueError('incorrect stc file size')\n\n # read the data matrix\n stc['data'] = np.frombuffer(buf, dtype=\">f4\", count=vertices_n * data_n,\n offset=offset)\n stc['data'] = stc['data'].reshape([data_n, vertices_n]).T\n\n return stc\n\n\ndef _write_stc(filename, tmin, tstep, vertices, data):\n \"\"\"Write an STC file.\n\n Parameters\n ----------\n filename : string\n The name of the STC file.\n tmin : float\n The first time point of the data in seconds.\n tstep : float\n Time between frames in seconds.\n vertices : array of integers\n Vertex indices (0 based).\n data : 2D array\n The data matrix (nvert * ntime).\n \"\"\"\n fid = open(filename, 'wb')\n\n # write start time in ms\n fid.write(np.array(1000 * tmin, dtype='>f4').tobytes())\n # write sampling rate in ms\n fid.write(np.array(1000 * tstep, dtype='>f4').tobytes())\n # write number of vertices\n fid.write(np.array(vertices.shape[0], dtype='>u4').tobytes())\n # write the vertex indices\n fid.write(np.array(vertices, dtype='>u4').tobytes())\n\n # write the number of timepts\n fid.write(np.array(data.shape[1], dtype='>u4').tobytes())\n #\n # write the data\n #\n fid.write(np.array(data.T, dtype='>f4').tobytes())\n\n # close the file\n fid.close()\n\n\ndef _read_3(fid):\n \"\"\"Read 3 byte integer from file.\"\"\"\n data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32)\n\n out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2]\n\n return out\n\n\ndef _read_w(filename):\n \"\"\"Read a w file.\n\n w files contain activations or source reconstructions for a single time\n point.\n\n Parameters\n ----------\n filename : string\n The name of the w file.\n\n Returns\n -------\n data: dict\n The w structure. It has the following keys:\n vertices vertex indices (0 based)\n data The data matrix (nvert long)\n \"\"\"\n with open(filename, 'rb', buffering=0) as fid: # buffering=0 for np bug\n # skip first 2 bytes\n fid.read(2)\n\n # read number of vertices/sources (3 byte integer)\n vertices_n = int(_read_3(fid))\n\n vertices = np.zeros((vertices_n), dtype=np.int32)\n data = np.zeros((vertices_n), dtype=np.float32)\n\n # read the vertices and data\n for i in range(vertices_n):\n vertices[i] = _read_3(fid)\n data[i] = np.fromfile(fid, dtype='>f4', count=1)[0]\n\n w = dict()\n w['vertices'] = vertices\n w['data'] = data\n\n return w\n\n\ndef _write_3(fid, val):\n \"\"\"Write 3 byte integer to file.\"\"\"\n f_bytes = np.zeros((3), dtype=np.uint8)\n f_bytes[0] = (val >> 16) & 255\n f_bytes[1] = (val >> 8) & 255\n f_bytes[2] = val & 255\n fid.write(f_bytes.tobytes())\n\n\ndef _write_w(filename, vertices, data):\n \"\"\"Write a w file.\n\n w files contain activations or source reconstructions for a single time\n point.\n\n Parameters\n ----------\n filename: string\n The name of the w file.\n vertices: array of int\n Vertex indices (0 based).\n data: 1D array\n The data array (nvert).\n \"\"\"\n assert (len(vertices) == len(data))\n\n fid = open(filename, 'wb')\n\n # write 2 zero bytes\n fid.write(np.zeros((2), dtype=np.uint8).tobytes())\n\n # write number of vertices/sources (3 byte integer)\n vertices_n = len(vertices)\n _write_3(fid, vertices_n)\n\n # write the vertices and data\n for i in range(vertices_n):\n _write_3(fid, vertices[i])\n # XXX: without float() endianness is wrong, not sure why\n fid.write(np.array(float(data[i]), dtype='>f4').tobytes())\n\n # close the file\n fid.close()\n\n\ndef read_source_estimate(fname, subject=None):\n \"\"\"Read a source estimate object.\n\n Parameters\n ----------\n fname : str\n Path to (a) source-estimate file(s).\n subject : str | None\n Name of the subject the source estimate(s) is (are) from.\n It is good practice to set this attribute to avoid combining\n incompatible labels and SourceEstimates (e.g., ones from other\n subjects). Note that due to file specification limitations, the\n subject name isn't saved to or loaded from files written to disk.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate | VolSourceEstimate | MixedSourceEstimate\n The source estimate object loaded from file.\n\n Notes\n -----\n - for volume source estimates, ``fname`` should provide the path to a\n single file named '*-vl.stc` or '*-vol.stc'\n - for surface source estimates, ``fname`` should either provide the\n path to the file corresponding to a single hemisphere ('*-lh.stc',\n '*-rh.stc') or only specify the asterisk part in these patterns. In any\n case, the function expects files for both hemisphere with names\n following this pattern.\n - for vector surface source estimates, only HDF5 files are supported.\n - for mixed source estimates, only HDF5 files are supported.\n - for single time point .w files, ``fname`` should follow the same\n pattern as for surface estimates, except that files are named\n '*-lh.w' and '*-rh.w'.\n \"\"\" # noqa: E501\n fname_arg = fname\n _validate_type(fname, 'path-like', 'fname')\n fname = str(fname)\n\n # make sure corresponding file(s) can be found\n ftype = None\n if op.exists(fname):\n if fname.endswith('-vl.stc') or fname.endswith('-vol.stc') or \\\n fname.endswith('-vl.w') or fname.endswith('-vol.w'):\n ftype = 'volume'\n elif fname.endswith('.stc'):\n ftype = 'surface'\n if fname.endswith(('-lh.stc', '-rh.stc')):\n fname = fname[:-7]\n else:\n err = (\"Invalid .stc filename: %r; needs to end with \"\n \"hemisphere tag ('...-lh.stc' or '...-rh.stc')\"\n % fname)\n raise IOError(err)\n elif fname.endswith('.w'):\n ftype = 'w'\n if fname.endswith(('-lh.w', '-rh.w')):\n fname = fname[:-5]\n else:\n err = (\"Invalid .w filename: %r; needs to end with \"\n \"hemisphere tag ('...-lh.w' or '...-rh.w')\"\n % fname)\n raise IOError(err)\n elif fname.endswith('.h5'):\n ftype = 'h5'\n fname = fname[:-3]\n else:\n raise RuntimeError('Unknown extension for file %s' % fname_arg)\n\n if ftype != 'volume':\n stc_exist = [op.exists(f)\n for f in [fname + '-rh.stc', fname + '-lh.stc']]\n w_exist = [op.exists(f)\n for f in [fname + '-rh.w', fname + '-lh.w']]\n if all(stc_exist) and ftype != 'w':\n ftype = 'surface'\n elif all(w_exist):\n ftype = 'w'\n elif op.exists(fname + '.h5'):\n ftype = 'h5'\n elif op.exists(fname + '-stc.h5'):\n ftype = 'h5'\n fname += '-stc'\n elif any(stc_exist) or any(w_exist):\n raise IOError(\"Hemisphere missing for %r\" % fname_arg)\n else:\n raise IOError(\"SourceEstimate File(s) not found for: %r\"\n % fname_arg)\n\n # read the files\n if ftype == 'volume': # volume source space\n if fname.endswith('.stc'):\n kwargs = _read_stc(fname)\n elif fname.endswith('.w'):\n kwargs = _read_w(fname)\n kwargs['data'] = kwargs['data'][:, np.newaxis]\n kwargs['tmin'] = 0.0\n kwargs['tstep'] = 0.0\n else:\n raise IOError('Volume source estimate must end with .stc or .w')\n kwargs['vertices'] = [kwargs['vertices']]\n elif ftype == 'surface': # stc file with surface source spaces\n lh = _read_stc(fname + '-lh.stc')\n rh = _read_stc(fname + '-rh.stc')\n assert lh['tmin'] == rh['tmin']\n assert lh['tstep'] == rh['tstep']\n kwargs = lh.copy()\n kwargs['data'] = np.r_[lh['data'], rh['data']]\n kwargs['vertices'] = [lh['vertices'], rh['vertices']]\n elif ftype == 'w': # w file with surface source spaces\n lh = _read_w(fname + '-lh.w')\n rh = _read_w(fname + '-rh.w')\n kwargs = lh.copy()\n kwargs['data'] = np.atleast_2d(np.r_[lh['data'], rh['data']]).T\n kwargs['vertices'] = [lh['vertices'], rh['vertices']]\n # w files only have a single time point\n kwargs['tmin'] = 0.0\n kwargs['tstep'] = 1.0\n ftype = 'surface'\n elif ftype == 'h5':\n kwargs = read_hdf5(fname + '.h5', title='mnepython')\n ftype = kwargs.pop('src_type', 'surface')\n if isinstance(kwargs['vertices'], np.ndarray):\n kwargs['vertices'] = [kwargs['vertices']]\n\n if ftype != 'volume':\n # Make sure the vertices are ordered\n vertices = kwargs['vertices']\n if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):\n sidx = [np.argsort(verts) for verts in vertices]\n vertices = [verts[idx] for verts, idx in zip(vertices, sidx)]\n data = kwargs['data'][np.r_[sidx[0], len(sidx[0]) + sidx[1]]]\n kwargs['vertices'] = vertices\n kwargs['data'] = data\n\n if 'subject' not in kwargs:\n kwargs['subject'] = subject\n if subject is not None and subject != kwargs['subject']:\n raise RuntimeError('provided subject name \"%s\" does not match '\n 'subject name from the file \"%s'\n % (subject, kwargs['subject']))\n\n if ftype in ('volume', 'discrete'):\n klass = VolVectorSourceEstimate\n elif ftype == 'mixed':\n klass = MixedVectorSourceEstimate\n else:\n assert ftype == 'surface'\n klass = VectorSourceEstimate\n if kwargs['data'].ndim < 3:\n klass = klass._scalar_class\n return klass(**kwargs)\n\n\ndef _get_src_type(src, vertices, warn_text=None):\n src_type = None\n if src is None:\n if warn_text is None:\n warn(\"src should not be None for a robust guess of stc type.\")\n else:\n warn(warn_text)\n if isinstance(vertices, list) and len(vertices) == 2:\n src_type = 'surface'\n elif isinstance(vertices, np.ndarray) or isinstance(vertices, list) \\\n and len(vertices) == 1:\n src_type = 'volume'\n elif isinstance(vertices, list) and len(vertices) > 2:\n src_type = 'mixed'\n else:\n src_type = src.kind\n assert src_type in ('surface', 'volume', 'mixed', 'discrete')\n return src_type\n\n\ndef _make_stc(data, vertices, src_type=None, tmin=None, tstep=None,\n subject=None, vector=False, source_nn=None, warn_text=None):\n \"\"\"Generate a surface, vector-surface, volume or mixed source estimate.\"\"\"\n def guess_src_type():\n return _get_src_type(src=None, vertices=vertices, warn_text=warn_text)\n\n src_type = guess_src_type() if src_type is None else src_type\n\n if vector and src_type == 'surface' and source_nn is None:\n raise RuntimeError('No source vectors supplied.')\n\n # infer Klass from src_type\n if src_type == 'surface':\n Klass = VectorSourceEstimate if vector else SourceEstimate\n elif src_type in ('volume', 'discrete'):\n Klass = VolVectorSourceEstimate if vector else VolSourceEstimate\n elif src_type == 'mixed':\n Klass = MixedVectorSourceEstimate if vector else MixedSourceEstimate\n else:\n raise ValueError('vertices has to be either a list with one or more '\n 'arrays or an array')\n\n # Rotate back for vector source estimates\n if vector:\n n_vertices = sum(len(v) for v in vertices)\n assert data.shape[0] in (n_vertices, n_vertices * 3)\n if len(data) == n_vertices:\n assert src_type == 'surface' # should only be possible for this\n assert source_nn.shape == (n_vertices, 3)\n data = data[:, np.newaxis] * source_nn[:, :, np.newaxis]\n else:\n data = data.reshape((-1, 3, data.shape[-1]))\n assert source_nn.shape in ((n_vertices, 3, 3),\n (n_vertices * 3, 3))\n # This will be an identity transform for volumes, but let's keep\n # the code simple and general and just do the matrix mult\n data = np.matmul(\n np.transpose(source_nn.reshape(n_vertices, 3, 3),\n axes=[0, 2, 1]), data)\n\n return Klass(\n data=data, vertices=vertices, tmin=tmin, tstep=tstep, subject=subject\n )\n\n\ndef _verify_source_estimate_compat(a, b):\n \"\"\"Make sure two SourceEstimates are compatible for arith. operations.\"\"\"\n compat = False\n if type(a) != type(b):\n raise ValueError('Cannot combine %s and %s.' % (type(a), type(b)))\n if len(a.vertices) == len(b.vertices):\n if all(np.array_equal(av, vv)\n for av, vv in zip(a.vertices, b.vertices)):\n compat = True\n if not compat:\n raise ValueError('Cannot combine source estimates that do not have '\n 'the same vertices. Consider using stc.expand().')\n if a.subject != b.subject:\n raise ValueError('source estimates do not have the same subject '\n 'names, %r and %r' % (a.subject, b.subject))\n\n\nclass _BaseSourceEstimate(TimeMixin):\n\n _data_ndim = 2\n\n @verbose\n def __init__(self, data, vertices, tmin, tstep,\n subject=None, verbose=None): # noqa: D102\n assert hasattr(self, '_data_ndim'), self.__class__.__name__\n assert hasattr(self, '_src_type'), self.__class__.__name__\n assert hasattr(self, '_src_count'), self.__class__.__name__\n kernel, sens_data = None, None\n if isinstance(data, tuple):\n if len(data) != 2:\n raise ValueError('If data is a tuple it has to be length 2')\n kernel, sens_data = data\n data = None\n if kernel.shape[1] != sens_data.shape[0]:\n raise ValueError('kernel (%s) and sens_data (%s) have invalid '\n 'dimensions'\n % (kernel.shape, sens_data.shape))\n if sens_data.ndim != 2:\n raise ValueError('The sensor data must have 2 dimensions, got '\n '%s' % (sens_data.ndim,))\n\n _validate_type(vertices, list, 'vertices')\n if self._src_count is not None:\n if len(vertices) != self._src_count:\n raise ValueError('vertices must be a list with %d entries, '\n 'got %s' % (self._src_count, len(vertices)))\n vertices = [np.array(v, np.int64) for v in vertices] # makes copy\n if any(np.any(np.diff(v) <= 0) for v in vertices):\n raise ValueError('Vertices must be ordered in increasing order.')\n\n n_src = sum([len(v) for v in vertices])\n\n # safeguard the user against doing something silly\n if data is not None:\n if data.ndim not in (self._data_ndim, self._data_ndim - 1):\n raise ValueError('Data (shape %s) must have %s dimensions for '\n '%s' % (data.shape, self._data_ndim,\n self.__class__.__name__))\n if data.shape[0] != n_src:\n raise ValueError(\n f'Number of vertices ({n_src}) and stc.data.shape[0] '\n f'({data.shape[0]}) must match')\n if self._data_ndim == 3:\n if data.shape[1] != 3:\n raise ValueError(\n 'Data for VectorSourceEstimate must have '\n 'shape[1] == 3, got shape %s' % (data.shape,))\n if data.ndim == self._data_ndim - 1: # allow upbroadcasting\n data = data[..., np.newaxis]\n\n self._data = data\n self._tmin = tmin\n self._tstep = tstep\n self.vertices = vertices\n self.verbose = verbose\n self._kernel = kernel\n self._sens_data = sens_data\n self._kernel_removed = False\n self._times = None\n self._update_times()\n self.subject = _check_subject(None, subject, False)\n\n def __repr__(self): # noqa: D105\n s = \"%d vertices\" % (sum(len(v) for v in self.vertices),)\n if self.subject is not None:\n s += \", subject : %s\" % self.subject\n s += \", tmin : %s (ms)\" % (1e3 * self.tmin)\n s += \", tmax : %s (ms)\" % (1e3 * self.times[-1])\n s += \", tstep : %s (ms)\" % (1e3 * self.tstep)\n s += \", data shape : %s\" % (self.shape,)\n sz = sum(object_size(x) for x in (self.vertices + [self.data]))\n s += f\", ~{sizeof_fmt(sz)}\"\n return \"<%s | %s>\" % (type(self).__name__, s)\n\n @fill_doc\n def get_peak(self, tmin=None, tmax=None, mode='abs',\n vert_as_index=False, time_as_index=False):\n \"\"\"Get location and latency of peak amplitude.\n\n Parameters\n ----------\n %(get_peak_parameters)s\n\n Returns\n -------\n pos : int\n The vertex exhibiting the maximum response, either ID or index.\n latency : float\n The latency in seconds.\n \"\"\"\n stc = self.magnitude() if self._data_ndim == 3 else self\n if self._n_vertices == 0:\n raise RuntimeError('Cannot find peaks with no vertices')\n vert_idx, time_idx, _ = _get_peak(\n stc.data, self.times, tmin, tmax, mode)\n if not vert_as_index:\n vert_idx = np.concatenate(self.vertices)[vert_idx]\n if not time_as_index:\n time_idx = self.times[time_idx]\n return vert_idx, time_idx\n\n @verbose\n def extract_label_time_course(self, labels, src, mode='auto',\n allow_empty=False, verbose=None):\n \"\"\"Extract label time courses for lists of labels.\n\n This function will extract one time course for each label. The way the\n time courses are extracted depends on the mode parameter.\n\n Parameters\n ----------\n %(eltc_labels)s\n %(eltc_src)s\n %(eltc_mode)s\n %(eltc_allow_empty)s\n %(verbose_meth)s\n\n Returns\n -------\n %(eltc_returns)s\n\n See Also\n --------\n extract_label_time_course : Extract time courses for multiple STCs.\n\n Notes\n -----\n %(eltc_mode_notes)s\n \"\"\"\n return extract_label_time_course(\n self, labels, src, mode=mode, return_generator=False,\n allow_empty=allow_empty, verbose=verbose)\n\n @verbose\n def apply_baseline(self, baseline=(None, 0), *, verbose=None):\n \"\"\"Baseline correct source estimate data.\n\n Parameters\n ----------\n %(baseline_stc)s\n Defaults to ``(None, 0)``, i.e. beginning of the the data until\n time point zero.\n %(verbose_meth)s\n\n Returns\n -------\n stc : instance of SourceEstimate\n The baseline-corrected source estimate object.\n\n Notes\n -----\n Baseline correction can be done multiple times.\n \"\"\"\n self.data = rescale(self.data, self.times, baseline, copy=False)\n return self\n\n @verbose\n def save(self, fname, ftype='h5', verbose=None):\n \"\"\"Save the full source estimate to an HDF5 file.\n\n Parameters\n ----------\n fname : str\n The file name to write the source estimate to, should end in\n '-stc.h5'.\n ftype : str\n File format to use. Currently, the only allowed values is \"h5\".\n %(verbose_meth)s\n \"\"\"\n _validate_type(fname, 'path-like', 'fname')\n fname = str(fname)\n if ftype != 'h5':\n raise ValueError('%s objects can only be written as HDF5 files.'\n % (self.__class__.__name__,))\n if not fname.endswith('.h5'):\n fname += '-stc.h5'\n write_hdf5(fname,\n dict(vertices=self.vertices, data=self.data,\n tmin=self.tmin, tstep=self.tstep, subject=self.subject,\n src_type=self._src_type),\n title='mnepython', overwrite=True)\n\n @copy_function_doc_to_method_doc(plot_source_estimates)\n def plot(self, subject=None, surface='inflated', hemi='lh',\n colormap='auto', time_label='auto', smoothing_steps=10,\n transparent=True, alpha=1.0, time_viewer='auto',\n subjects_dir=None,\n figure=None, views='auto', colorbar=True, clim='auto',\n cortex=\"classic\", size=800, background=\"black\",\n foreground=None, initial_time=None, time_unit='s',\n backend='auto', spacing='oct6', title=None, show_traces='auto',\n src=None, volume_options=1., view_layout='vertical',\n add_data_kwargs=None, verbose=None):\n brain = plot_source_estimates(\n self, subject, surface=surface, hemi=hemi, colormap=colormap,\n time_label=time_label, smoothing_steps=smoothing_steps,\n transparent=transparent, alpha=alpha, time_viewer=time_viewer,\n subjects_dir=subjects_dir, figure=figure, views=views,\n colorbar=colorbar, clim=clim, cortex=cortex, size=size,\n background=background, foreground=foreground,\n initial_time=initial_time, time_unit=time_unit, backend=backend,\n spacing=spacing, title=title, show_traces=show_traces,\n src=src, volume_options=volume_options, view_layout=view_layout,\n add_data_kwargs=add_data_kwargs, verbose=verbose)\n return brain\n\n @property\n def sfreq(self):\n \"\"\"Sample rate of the data.\"\"\"\n return 1. / self.tstep\n\n @property\n def _n_vertices(self):\n return sum(len(v) for v in self.vertices)\n\n def _remove_kernel_sens_data_(self):\n \"\"\"Remove kernel and sensor space data and compute self._data.\"\"\"\n if self._kernel is not None or self._sens_data is not None:\n self._kernel_removed = True\n self._data = np.dot(self._kernel, self._sens_data)\n self._kernel = None\n self._sens_data = None\n\n @fill_doc\n def crop(self, tmin=None, tmax=None, include_tmax=True):\n \"\"\"Restrict SourceEstimate to a time interval.\n\n Parameters\n ----------\n tmin : float | None\n The first time point in seconds. If None the first present is used.\n tmax : float | None\n The last time point in seconds. If None the last present is used.\n %(include_tmax)s\n\n Returns\n -------\n stc : instance of SourceEstimate\n The cropped source estimate.\n \"\"\"\n mask = _time_mask(self.times, tmin, tmax, sfreq=self.sfreq,\n include_tmax=include_tmax)\n self.tmin = self.times[np.where(mask)[0][0]]\n if self._kernel is not None and self._sens_data is not None:\n self._sens_data = self._sens_data[..., mask]\n else:\n self.data = self.data[..., mask]\n\n return self # return self for chaining methods\n\n @verbose\n def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1,\n verbose=None):\n \"\"\"Resample data.\n\n If appropriate, an anti-aliasing filter is applied before resampling.\n See :ref:`resampling-and-decimating` for more information.\n\n Parameters\n ----------\n sfreq : float\n New sample rate to use.\n npad : int | str\n Amount to pad the start and end of the data.\n Can also be \"auto\" to use a padding that will result in\n a power-of-two size (can be much faster).\n window : str | tuple\n Window to use in resampling. See :func:`scipy.signal.resample`.\n %(n_jobs)s\n %(verbose_meth)s\n\n Returns\n -------\n stc : instance of SourceEstimate\n The resampled source estimate.\n\n Notes\n -----\n For some data, it may be more accurate to use npad=0 to reduce\n artifacts. This is dataset dependent -- check your data!\n\n Note that the sample rate of the original data is inferred from tstep.\n \"\"\"\n # resampling in sensor instead of source space gives a somewhat\n # different result, so we don't allow it\n self._remove_kernel_sens_data_()\n\n o_sfreq = 1.0 / self.tstep\n data = self.data\n if data.dtype == np.float32:\n data = data.astype(np.float64)\n self.data = resample(data, sfreq, o_sfreq, npad, n_jobs=n_jobs)\n\n # adjust indirectly affected variables\n self.tstep = 1.0 / sfreq\n return self\n\n @property\n def data(self):\n \"\"\"Numpy array of source estimate data.\"\"\"\n if self._data is None:\n # compute the solution the first time the data is accessed and\n # remove the kernel and sensor data\n self._remove_kernel_sens_data_()\n return self._data\n\n @data.setter\n def data(self, value):\n value = np.asarray(value)\n if self._data is not None and value.ndim != self._data.ndim:\n raise ValueError('Data array should have %d dimensions.' %\n self._data.ndim)\n n_verts = sum(len(v) for v in self.vertices)\n if value.shape[0] != n_verts:\n raise ValueError('The first dimension of the data array must '\n 'match the number of vertices (%d != %d)' %\n (value.shape[0], n_verts))\n self._data = value\n self._update_times()\n\n @property\n def shape(self):\n \"\"\"Shape of the data.\"\"\"\n if self._data is not None:\n return self._data.shape\n return (self._kernel.shape[0], self._sens_data.shape[1])\n\n @property\n def tmin(self):\n \"\"\"The first timestamp.\"\"\"\n return self._tmin\n\n @tmin.setter\n def tmin(self, value):\n self._tmin = float(value)\n self._update_times()\n\n @property\n def tstep(self):\n \"\"\"The change in time between two consecutive samples (1 / sfreq).\"\"\"\n return self._tstep\n\n @tstep.setter\n def tstep(self, value):\n if value <= 0:\n raise ValueError('.tstep must be greater than 0.')\n self._tstep = float(value)\n self._update_times()\n\n @property\n def times(self):\n \"\"\"A timestamp for each sample.\"\"\"\n return self._times\n\n @times.setter\n def times(self, value):\n raise ValueError('You cannot write to the .times attribute directly. '\n 'This property automatically updates whenever '\n '.tmin, .tstep or .data changes.')\n\n def _update_times(self):\n \"\"\"Update the times attribute after changing tmin, tmax, or tstep.\"\"\"\n self._times = self.tmin + (self.tstep * np.arange(self.shape[-1]))\n self._times.flags.writeable = False\n\n def __add__(self, a):\n \"\"\"Add source estimates.\"\"\"\n stc = self.copy()\n stc += a\n return stc\n\n def __iadd__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n if isinstance(a, _BaseSourceEstimate):\n _verify_source_estimate_compat(self, a)\n self.data += a.data\n else:\n self.data += a\n return self\n\n def mean(self):\n \"\"\"Make a summary stc file with mean over time points.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The modified stc.\n \"\"\"\n out = self.sum()\n out /= len(self.times)\n return out\n\n def sum(self):\n \"\"\"Make a summary stc file with sum over time points.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The modified stc.\n \"\"\"\n data = self.data\n tmax = self.tmin + self.tstep * data.shape[-1]\n tmin = (self.tmin + tmax) / 2.\n tstep = tmax - self.tmin\n sum_stc = self.__class__(self.data.sum(axis=-1, keepdims=True),\n vertices=self.vertices, tmin=tmin,\n tstep=tstep, subject=self.subject)\n return sum_stc\n\n def __sub__(self, a):\n \"\"\"Subtract source estimates.\"\"\"\n stc = self.copy()\n stc -= a\n return stc\n\n def __isub__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n if isinstance(a, _BaseSourceEstimate):\n _verify_source_estimate_compat(self, a)\n self.data -= a.data\n else:\n self.data -= a\n return self\n\n def __truediv__(self, a): # noqa: D105\n return self.__div__(a)\n\n def __div__(self, a): # noqa: D105\n \"\"\"Divide source estimates.\"\"\"\n stc = self.copy()\n stc /= a\n return stc\n\n def __itruediv__(self, a): # noqa: D105\n return self.__idiv__(a)\n\n def __idiv__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n if isinstance(a, _BaseSourceEstimate):\n _verify_source_estimate_compat(self, a)\n self.data /= a.data\n else:\n self.data /= a\n return self\n\n def __mul__(self, a):\n \"\"\"Multiply source estimates.\"\"\"\n stc = self.copy()\n stc *= a\n return stc\n\n def __imul__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n if isinstance(a, _BaseSourceEstimate):\n _verify_source_estimate_compat(self, a)\n self.data *= a.data\n else:\n self.data *= a\n return self\n\n def __pow__(self, a): # noqa: D105\n stc = self.copy()\n stc **= a\n return stc\n\n def __ipow__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n self.data **= a\n return self\n\n def __radd__(self, a): # noqa: D105\n return self + a\n\n def __rsub__(self, a): # noqa: D105\n return self - a\n\n def __rmul__(self, a): # noqa: D105\n return self * a\n\n def __rdiv__(self, a): # noqa: D105\n return self / a\n\n def __neg__(self): # noqa: D105\n \"\"\"Negate the source estimate.\"\"\"\n stc = self.copy()\n stc._remove_kernel_sens_data_()\n stc.data *= -1\n return stc\n\n def __pos__(self): # noqa: D105\n return self\n\n def __abs__(self):\n \"\"\"Compute the absolute value of the data.\n\n Returns\n -------\n stc : instance of _BaseSourceEstimate\n A version of the source estimate, where the data attribute is set\n to abs(self.data).\n \"\"\"\n stc = self.copy()\n stc._remove_kernel_sens_data_()\n stc._data = abs(stc._data)\n return stc\n\n def sqrt(self):\n \"\"\"Take the square root.\n\n Returns\n -------\n stc : instance of SourceEstimate\n A copy of the SourceEstimate with sqrt(data).\n \"\"\"\n return self ** (0.5)\n\n def copy(self):\n \"\"\"Return copy of source estimate instance.\n\n Returns\n -------\n stc : instance of SourceEstimate\n A copy of the source estimate.\n \"\"\"\n return copy.deepcopy(self)\n\n def bin(self, width, tstart=None, tstop=None, func=np.mean):\n \"\"\"Return a source estimate object with data summarized over time bins.\n\n Time bins of ``width`` seconds. This method is intended for\n visualization only. No filter is applied to the data before binning,\n making the method inappropriate as a tool for downsampling data.\n\n Parameters\n ----------\n width : scalar\n Width of the individual bins in seconds.\n tstart : scalar | None\n Time point where the first bin starts. The default is the first\n time point of the stc.\n tstop : scalar | None\n Last possible time point contained in a bin (if the last bin would\n be shorter than width it is dropped). The default is the last time\n point of the stc.\n func : callable\n Function that is applied to summarize the data. Needs to accept a\n numpy.array as first input and an ``axis`` keyword argument.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The binned source estimate.\n \"\"\"\n if tstart is None:\n tstart = self.tmin\n if tstop is None:\n tstop = self.times[-1]\n\n times = np.arange(tstart, tstop + self.tstep, width)\n nt = len(times) - 1\n data = np.empty(self.shape[:-1] + (nt,), dtype=self.data.dtype)\n for i in range(nt):\n idx = (self.times >= times[i]) & (self.times < times[i + 1])\n data[..., i] = func(self.data[..., idx], axis=-1)\n\n tmin = times[0] + width / 2.\n stc = self.copy()\n stc._data = data\n stc.tmin = tmin\n stc.tstep = width\n return stc\n\n def transform_data(self, func, idx=None, tmin_idx=None, tmax_idx=None):\n \"\"\"Get data after a linear (time) transform has been applied.\n\n The transform is applied to each source time course independently.\n\n Parameters\n ----------\n func : callable\n The transform to be applied, including parameters (see, e.g.,\n :func:`functools.partial`). The first parameter of the function is\n the input data. The first return value is the transformed data,\n remaining outputs are ignored. The first dimension of the\n transformed data has to be the same as the first dimension of the\n input data.\n idx : array | None\n Indicices of source time courses for which to compute transform.\n If None, all time courses are used.\n tmin_idx : int | None\n Index of first time point to include. If None, the index of the\n first time point is used.\n tmax_idx : int | None\n Index of the first time point not to include. If None, time points\n up to (and including) the last time point are included.\n\n Returns\n -------\n data_t : ndarray\n The transformed data.\n\n Notes\n -----\n Applying transforms can be significantly faster if the\n SourceEstimate object was created using \"(kernel, sens_data)\", for\n the \"data\" parameter as the transform is applied in sensor space.\n Inverse methods, e.g., \"apply_inverse_epochs\", or \"apply_lcmv_epochs\"\n do this automatically (if possible).\n \"\"\"\n if idx is None:\n # use all time courses by default\n idx = slice(None, None)\n\n if self._kernel is None and self._sens_data is None:\n if self._kernel_removed:\n warn('Performance can be improved by not accessing the data '\n 'attribute before calling this method.')\n\n # transform source space data directly\n data_t = func(self.data[idx, ..., tmin_idx:tmax_idx])\n\n if isinstance(data_t, tuple):\n # use only first return value\n data_t = data_t[0]\n else:\n # apply transform in sensor space\n sens_data_t = func(self._sens_data[:, tmin_idx:tmax_idx])\n\n if isinstance(sens_data_t, tuple):\n # use only first return value\n sens_data_t = sens_data_t[0]\n\n # apply inverse\n data_shape = sens_data_t.shape\n if len(data_shape) > 2:\n # flatten the last dimensions\n sens_data_t = sens_data_t.reshape(data_shape[0],\n np.prod(data_shape[1:]))\n\n data_t = np.dot(self._kernel[idx, :], sens_data_t)\n\n # restore original shape if necessary\n if len(data_shape) > 2:\n data_t = data_t.reshape(data_t.shape[0], *data_shape[1:])\n\n return data_t\n\n def transform(self, func, idx=None, tmin=None, tmax=None, copy=False):\n \"\"\"Apply linear transform.\n\n The transform is applied to each source time course independently.\n\n Parameters\n ----------\n func : callable\n The transform to be applied, including parameters (see, e.g.,\n :func:`functools.partial`). The first parameter of the function is\n the input data. The first two dimensions of the transformed data\n should be (i) vertices and (ii) time. See Notes for details.\n idx : array | None\n Indices of source time courses for which to compute transform.\n If None, all time courses are used.\n tmin : float | int | None\n First time point to include (ms). If None, self.tmin is used.\n tmax : float | int | None\n Last time point to include (ms). If None, self.tmax is used.\n copy : bool\n If True, return a new instance of SourceEstimate instead of\n modifying the input inplace.\n\n Returns\n -------\n stcs : SourceEstimate | VectorSourceEstimate | list\n The transformed stc or, in the case of transforms which yield\n N-dimensional output (where N > 2), a list of stcs. For a list,\n copy must be True.\n\n Notes\n -----\n Transforms which yield 3D\n output (e.g. time-frequency transforms) are valid, so long as the\n first two dimensions are vertices and time. In this case, the\n copy parameter must be True and a list of\n SourceEstimates, rather than a single instance of SourceEstimate,\n will be returned, one for each index of the 3rd dimension of the\n transformed data. In the case of transforms yielding 2D output\n (e.g. filtering), the user has the option of modifying the input\n inplace (copy = False) or returning a new instance of\n SourceEstimate (copy = True) with the transformed data.\n\n Applying transforms can be significantly faster if the\n SourceEstimate object was created using \"(kernel, sens_data)\", for\n the \"data\" parameter as the transform is applied in sensor space.\n Inverse methods, e.g., \"apply_inverse_epochs\", or \"apply_lcmv_epochs\"\n do this automatically (if possible).\n \"\"\"\n # min and max data indices to include\n times = 1000. * self.times\n t_idx = np.where(_time_mask(times, tmin, tmax, sfreq=self.sfreq))[0]\n if tmin is None:\n tmin_idx = None\n else:\n tmin_idx = t_idx[0]\n\n if tmax is None:\n tmax_idx = None\n else:\n # +1, because upper boundary needs to include the last sample\n tmax_idx = t_idx[-1] + 1\n\n data_t = self.transform_data(func, idx=idx, tmin_idx=tmin_idx,\n tmax_idx=tmax_idx)\n\n # account for change in n_vertices\n if idx is not None:\n idx_lh = idx[idx < len(self.lh_vertno)]\n idx_rh = idx[idx >= len(self.lh_vertno)] - len(self.lh_vertno)\n verts_lh = self.lh_vertno[idx_lh]\n verts_rh = self.rh_vertno[idx_rh]\n else:\n verts_lh = self.lh_vertno\n verts_rh = self.rh_vertno\n verts = [verts_lh, verts_rh]\n\n tmin_idx = 0 if tmin_idx is None else tmin_idx\n tmin = self.times[tmin_idx]\n\n if data_t.ndim > 2:\n # return list of stcs if transformed data has dimensionality > 2\n if copy:\n stcs = [SourceEstimate(data_t[:, :, a], verts, tmin,\n self.tstep, self.subject)\n for a in range(data_t.shape[-1])]\n else:\n raise ValueError('copy must be True if transformed data has '\n 'more than 2 dimensions')\n else:\n # return new or overwritten stc\n stcs = self if not copy else self.copy()\n stcs.vertices = verts\n stcs.data = data_t\n stcs.tmin = tmin\n\n return stcs\n\n @fill_doc\n def to_data_frame(self, index=None, scalings=None,\n long_format=False, time_format='ms'):\n \"\"\"Export data in tabular structure as a pandas DataFrame.\n\n Vertices are converted to columns in the DataFrame. By default,\n an additional column \"time\" is added, unless ``index='time'``\n (in which case time values form the DataFrame's index).\n\n Parameters\n ----------\n %(df_index_evk)s\n Defaults to ``None``.\n %(df_scalings)s\n %(df_longform_stc)s\n %(df_time_format)s\n\n .. versionadded:: 0.20\n\n Returns\n -------\n %(df_return)s\n \"\"\"\n # check pandas once here, instead of in each private utils function\n pd = _check_pandas_installed() # noqa\n # arg checking\n valid_index_args = ['time', 'subject']\n valid_time_formats = ['ms', 'timedelta']\n index = _check_pandas_index_arguments(index, valid_index_args)\n time_format = _check_time_format(time_format, valid_time_formats)\n # get data\n data = self.data.T\n times = self.times\n # prepare extra columns / multiindex\n mindex = list()\n default_index = ['time']\n if self.subject is not None:\n default_index = ['subject', 'time']\n mindex.append(('subject', np.repeat(self.subject, data.shape[0])))\n times = _convert_times(self, times, time_format)\n mindex.append(('time', times))\n # triage surface vs volume source estimates\n col_names = list()\n kinds = ['VOL'] * len(self.vertices)\n if isinstance(self, (_BaseSurfaceSourceEstimate,\n _BaseMixedSourceEstimate)):\n kinds[:2] = ['LH', 'RH']\n for ii, (kind, vertno) in enumerate(zip(kinds, self.vertices)):\n col_names.extend(['{}_{}'.format(kind, vert) for vert in vertno])\n # build DataFrame\n df = _build_data_frame(self, data, None, long_format, mindex, index,\n default_index=default_index,\n col_names=col_names, col_kind='source')\n return df\n\n\ndef _center_of_mass(vertices, values, hemi, surf, subject, subjects_dir,\n restrict_vertices):\n \"\"\"Find the center of mass on a surface.\"\"\"\n if (values == 0).all() or (values < 0).any():\n raise ValueError('All values must be non-negative and at least one '\n 'must be non-zero, cannot compute COM')\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n surf = read_surface(op.join(subjects_dir, subject, 'surf',\n hemi + '.' + surf))\n if restrict_vertices is True:\n restrict_vertices = vertices\n elif restrict_vertices is False:\n restrict_vertices = np.arange(surf[0].shape[0])\n elif isinstance(restrict_vertices, SourceSpaces):\n idx = 1 if restrict_vertices.kind == 'surface' and hemi == 'rh' else 0\n restrict_vertices = restrict_vertices[idx]['vertno']\n else:\n restrict_vertices = np.array(restrict_vertices, int)\n pos = surf[0][vertices, :].T\n c_o_m = np.sum(pos * values, axis=1) / np.sum(values)\n vertex = np.argmin(np.sqrt(np.mean((surf[0][restrict_vertices, :] -\n c_o_m) ** 2, axis=1)))\n vertex = restrict_vertices[vertex]\n return vertex\n\n\n@fill_doc\nclass _BaseSurfaceSourceEstimate(_BaseSourceEstimate):\n \"\"\"Abstract base class for surface source estimates.\n\n Parameters\n ----------\n data : array\n The data in source space.\n vertices : list of array, shape (2,)\n Vertex numbers corresponding to the data. The first element of the list\n contains vertices of left hemisphere and the second element contains\n vertices of right hemisphere.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : list of array, shape (2,)\n Vertex numbers corresponding to the data. The first element of the list\n contains vertices of left hemisphere and the second element contains\n vertices of right hemisphere.\n data : array\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n \"\"\"\n\n _src_type = 'surface'\n _src_count = 2\n\n @property\n def lh_data(self):\n \"\"\"Left hemisphere data.\"\"\"\n return self.data[:len(self.lh_vertno)]\n\n @property\n def rh_data(self):\n \"\"\"Right hemisphere data.\"\"\"\n return self.data[len(self.lh_vertno):]\n\n @property\n def lh_vertno(self):\n \"\"\"Left hemisphere vertno.\"\"\"\n return self.vertices[0]\n\n @property\n def rh_vertno(self):\n \"\"\"Right hemisphere vertno.\"\"\"\n return self.vertices[1]\n\n def _hemilabel_stc(self, label):\n if label.hemi == 'lh':\n stc_vertices = self.vertices[0]\n else:\n stc_vertices = self.vertices[1]\n\n # find index of the Label's vertices\n idx = np.nonzero(np.in1d(stc_vertices, label.vertices))[0]\n\n # find output vertices\n vertices = stc_vertices[idx]\n\n # find data\n if label.hemi == 'rh':\n values = self.data[idx + len(self.vertices[0])]\n else:\n values = self.data[idx]\n\n return vertices, values\n\n def in_label(self, label):\n \"\"\"Get a source estimate object restricted to a label.\n\n SourceEstimate contains the time course of\n activation of all sources inside the label.\n\n Parameters\n ----------\n label : Label | BiHemiLabel\n The label (as created for example by mne.read_label). If the label\n does not match any sources in the SourceEstimate, a ValueError is\n raised.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The source estimate restricted to the given label.\n \"\"\"\n # make sure label and stc are compatible\n from .label import Label, BiHemiLabel\n _validate_type(label, (Label, BiHemiLabel), 'label')\n if label.subject is not None and self.subject is not None \\\n and label.subject != self.subject:\n raise RuntimeError('label and stc must have same subject names, '\n 'currently \"%s\" and \"%s\"' % (label.subject,\n self.subject))\n\n if label.hemi == 'both':\n lh_vert, lh_val = self._hemilabel_stc(label.lh)\n rh_vert, rh_val = self._hemilabel_stc(label.rh)\n vertices = [lh_vert, rh_vert]\n values = np.vstack((lh_val, rh_val))\n elif label.hemi == 'lh':\n lh_vert, values = self._hemilabel_stc(label)\n vertices = [lh_vert, np.array([], int)]\n else:\n assert label.hemi == 'rh'\n rh_vert, values = self._hemilabel_stc(label)\n vertices = [np.array([], int), rh_vert]\n\n if sum([len(v) for v in vertices]) == 0:\n raise ValueError('No vertices match the label in the stc file')\n\n label_stc = self.__class__(values, vertices=vertices, tmin=self.tmin,\n tstep=self.tstep, subject=self.subject)\n return label_stc\n\n def expand(self, vertices):\n \"\"\"Expand SourceEstimate to include more vertices.\n\n This will add rows to stc.data (zero-filled) and modify stc.vertices\n to include all vertices in stc.vertices and the input vertices.\n\n Parameters\n ----------\n vertices : list of array\n New vertices to add. Can also contain old values.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The modified stc (note: method operates inplace).\n \"\"\"\n if not isinstance(vertices, list):\n raise TypeError('vertices must be a list')\n if not len(self.vertices) == len(vertices):\n raise ValueError('vertices must have the same length as '\n 'stc.vertices')\n\n # can no longer use kernel and sensor data\n self._remove_kernel_sens_data_()\n\n inserters = list()\n offsets = [0]\n for vi, (v_old, v_new) in enumerate(zip(self.vertices, vertices)):\n v_new = np.setdiff1d(v_new, v_old)\n inds = np.searchsorted(v_old, v_new)\n # newer numpy might overwrite inds after np.insert, copy here\n inserters += [inds.copy()]\n offsets += [len(v_old)]\n self.vertices[vi] = np.insert(v_old, inds, v_new)\n inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]\n inds = np.concatenate(inds)\n new_data = np.zeros((len(inds),) + self.data.shape[1:])\n self.data = np.insert(self.data, inds, new_data, axis=0)\n return self\n\n @verbose\n def to_original_src(self, src_orig, subject_orig=None,\n subjects_dir=None, verbose=None):\n \"\"\"Get a source estimate from morphed source to the original subject.\n\n Parameters\n ----------\n src_orig : instance of SourceSpaces\n The original source spaces that were morphed to the current\n subject.\n subject_orig : str | None\n The original subject. For most source spaces this shouldn't need\n to be provided, since it is stored in the source space itself.\n %(subjects_dir)s\n %(verbose_meth)s\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The transformed source estimate.\n\n See Also\n --------\n morph_source_spaces\n\n Notes\n -----\n .. versionadded:: 0.10.0\n \"\"\"\n if self.subject is None:\n raise ValueError('stc.subject must be set')\n src_orig = _ensure_src(src_orig, kind='surface')\n subject_orig = _ensure_src_subject(src_orig, subject_orig)\n data_idx, vertices = _get_morph_src_reordering(\n self.vertices, src_orig, subject_orig, self.subject, subjects_dir)\n return self.__class__(self._data[data_idx], vertices,\n self.tmin, self.tstep, subject_orig)\n\n @fill_doc\n def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs',\n vert_as_index=False, time_as_index=False):\n \"\"\"Get location and latency of peak amplitude.\n\n Parameters\n ----------\n hemi : {'lh', 'rh', None}\n The hemi to be considered. If None, the entire source space is\n considered.\n %(get_peak_parameters)s\n\n Returns\n -------\n pos : int\n The vertex exhibiting the maximum response, either ID or index.\n latency : float | int\n The time point of the maximum response, either latency in seconds\n or index.\n \"\"\"\n _check_option('hemi', hemi, ('lh', 'rh', None))\n vertex_offset = 0\n if hemi is not None:\n if hemi == 'lh':\n data = self.lh_data\n vertices = [self.lh_vertno, []]\n else:\n vertex_offset = len(self.vertices[0])\n data = self.rh_data\n vertices = [[], self.rh_vertno]\n meth = self.__class__(\n data, vertices, self.tmin, self.tstep).get_peak\n else:\n meth = super().get_peak\n out = meth(tmin=tmin, tmax=tmax, mode=mode,\n vert_as_index=vert_as_index,\n time_as_index=time_as_index)\n if vertex_offset and vert_as_index:\n out = (out[0] + vertex_offset, out[1])\n return out\n\n\n@fill_doc\nclass SourceEstimate(_BaseSurfaceSourceEstimate):\n \"\"\"Container for surface source estimates.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, n_times) | tuple, shape (2,)\n The data in source space. When it is a single array, the\n left hemisphere is stored in data[:len(vertices[0])] and the right\n hemisphere is stored in data[-len(vertices[1]):].\n When data is a tuple, it contains two arrays:\n\n - \"kernel\" shape (n_vertices, n_sensors) and\n - \"sens_data\" shape (n_sensors, n_times).\n\n In this case, the source space data corresponds to\n ``np.dot(kernel, sens_data)``.\n vertices : list of array, shape (2,)\n Vertex numbers corresponding to the data. The first element of the list\n contains vertices of left hemisphere and the second element contains\n vertices of right hemisphere.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : list of array, shape (2,)\n The indices of the dipoles in the left and right source space.\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n VectorSourceEstimate : A container for vector source estimates.\n VolSourceEstimate : A container for volume source estimates.\n MixedSourceEstimate : A container for mixed surface + volume source\n estimates.\n \"\"\"\n\n @verbose\n def save(self, fname, ftype='stc', verbose=None):\n \"\"\"Save the source estimates to a file.\n\n Parameters\n ----------\n fname : str\n The stem of the file name. The file names used for surface source\n spaces are obtained by adding \"-lh.stc\" and \"-rh.stc\" (or \"-lh.w\"\n and \"-rh.w\") to the stem provided, for the left and the right\n hemisphere, respectively.\n ftype : str\n File format to use. Allowed values are \"stc\" (default), \"w\",\n and \"h5\". The \"w\" format only supports a single time point.\n %(verbose_meth)s\n \"\"\"\n _validate_type(fname, 'path-like', 'fname')\n fname = str(fname)\n _check_option('ftype', ftype, ['stc', 'w', 'h5'])\n\n lh_data = self.data[:len(self.lh_vertno)]\n rh_data = self.data[-len(self.rh_vertno):]\n\n if ftype == 'stc':\n if np.iscomplexobj(self.data):\n raise ValueError(\"Cannot save complex-valued STC data in \"\n \"FIFF format; please set ftype='h5' to save \"\n \"in HDF5 format instead, or cast the data to \"\n \"real numbers before saving.\")\n logger.info('Writing STC to disk...')\n _write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,\n vertices=self.lh_vertno, data=lh_data)\n _write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,\n vertices=self.rh_vertno, data=rh_data)\n\n elif ftype == 'w':\n if self.shape[1] != 1:\n raise ValueError('w files can only contain a single time '\n 'point')\n logger.info('Writing STC to disk (w format)...')\n _write_w(fname + '-lh.w', vertices=self.lh_vertno,\n data=lh_data[:, 0])\n _write_w(fname + '-rh.w', vertices=self.rh_vertno,\n data=rh_data[:, 0])\n\n elif ftype == 'h5':\n super().save(fname)\n logger.info('[done]')\n\n @verbose\n def estimate_snr(self, info, fwd, cov, verbose=None):\n r\"\"\"Compute time-varying SNR in the source space.\n\n This function should only be used with source estimates with units\n nanoAmperes (i.e., MNE-like solutions, *not* dSPM or sLORETA).\n\n .. warning:: This function currently only works properly for fixed\n orientation.\n\n Parameters\n ----------\n info : instance Info\n The measurement info.\n fwd : instance of Forward\n The forward solution used to create the source estimate.\n cov : instance of Covariance\n The noise covariance used to estimate the resting cortical\n activations. Should be an evoked covariance, not empty room.\n %(verbose)s\n\n Returns\n -------\n snr_stc : instance of SourceEstimate\n The source estimate with the SNR computed.\n\n Notes\n -----\n We define the SNR in decibels for each source location at each\n time point as:\n\n .. math::\n\n {\\rm SNR} = 10\\log_10[\\frac{a^2}{N}\\sum_k\\frac{b_k^2}{s_k^2}]\n\n where :math:`\\\\b_k` is the signal on sensor :math:`k` provided by the\n forward model for a source with unit amplitude, :math:`a` is the\n source amplitude, :math:`N` is the number of sensors, and\n :math:`s_k^2` is the noise variance on sensor :math:`k`.\n\n References\n ----------\n .. [1] Goldenholz, D. M., Ahlfors, S. P., Hämäläinen, M. S., Sharon,\n D., Ishitobi, M., Vaina, L. M., & Stufflebeam, S. M. (2009).\n Mapping the Signal-To-Noise-Ratios of Cortical Sources in\n Magnetoencephalography and Electroencephalography.\n Human Brain Mapping, 30(4), 1077–1086. doi:10.1002/hbm.20571\n \"\"\"\n from .forward import convert_forward_solution, Forward\n from .minimum_norm.inverse import _prepare_forward\n _validate_type(fwd, Forward, 'fwd')\n _validate_type(info, Info, 'info')\n _validate_type(cov, Covariance, 'cov')\n _check_stc_units(self)\n if (self.data >= 0).all():\n warn('This STC appears to be from free orientation, currently SNR'\n ' function is valid only for fixed orientation')\n\n fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=False)\n\n # G is gain matrix [ch x src], cov is noise covariance [ch x ch]\n G, _, _, _, _, _, _, cov, _ = _prepare_forward(\n fwd, info, cov, fixed=True, loose=0, rank=None, pca=False,\n use_cps=True, exp=None, limit_depth_chs=False, combine_xyz='fro',\n allow_fixed_depth=False, limit=None)\n G = G['sol']['data']\n n_channels = cov['dim'] # number of sensors/channels\n b_k2 = (G * G).T\n s_k2 = np.diag(cov['data'])\n scaling = (1 / n_channels) * np.sum(b_k2 / s_k2, axis=1, keepdims=True)\n snr_stc = self.copy()\n snr_stc._data[:] = 10 * np.log10((self.data * self.data) * scaling)\n return snr_stc\n\n @fill_doc\n def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False,\n subjects_dir=None, surf='sphere'):\n \"\"\"Compute the center of mass of activity.\n\n This function computes the spatial center of mass on the surface\n as well as the temporal center of mass as in [1]_.\n\n .. note:: All activity must occur in a single hemisphere, otherwise\n an error is raised. The \"mass\" of each point in space for\n computing the spatial center of mass is computed by summing\n across time, and vice-versa for each point in time in\n computing the temporal center of mass. This is useful for\n quantifying spatio-temporal cluster locations, especially\n when combined with :func:`mne.vertex_to_mni`.\n\n Parameters\n ----------\n subject : str | None\n The subject the stc is defined for.\n hemi : int, or None\n Calculate the center of mass for the left (0) or right (1)\n hemisphere. If None, one of the hemispheres must be all zeroes,\n and the center of mass will be calculated for the other\n hemisphere (useful for getting COM for clusters).\n restrict_vertices : bool | array of int | instance of SourceSpaces\n If True, returned vertex will be one from stc. Otherwise, it could\n be any vertex from surf. If an array of int, the returned vertex\n will come from that array. If instance of SourceSpaces (as of\n 0.13), the returned vertex will be from the given source space.\n For most accuruate estimates, do not restrict vertices.\n %(subjects_dir)s\n surf : str\n The surface to use for Euclidean distance center of mass\n finding. The default here is \"sphere\", which finds the center\n of mass on the spherical surface to help avoid potential issues\n with cortical folding.\n\n Returns\n -------\n vertex : int\n Vertex of the spatial center of mass for the inferred hemisphere,\n with each vertex weighted by the sum of the stc across time. For a\n boolean stc, then, this would be weighted purely by the duration\n each vertex was active.\n hemi : int\n Hemisphere the vertex was taken from.\n t : float\n Time of the temporal center of mass (weighted by the sum across\n source vertices).\n\n See Also\n --------\n mne.Label.center_of_mass\n mne.vertex_to_mni\n\n References\n ----------\n .. [1] Larson and Lee, \"The cortical dynamics underlying effective\n switching of auditory spatial attention\", NeuroImage 2012.\n \"\"\"\n if not isinstance(surf, str):\n raise TypeError('surf must be a string, got %s' % (type(surf),))\n subject = _check_subject(self.subject, subject)\n if np.any(self.data < 0):\n raise ValueError('Cannot compute COM with negative values')\n values = np.sum(self.data, axis=1) # sum across time\n vert_inds = [np.arange(len(self.vertices[0])),\n np.arange(len(self.vertices[1])) + len(self.vertices[0])]\n if hemi is None:\n hemi = np.where(np.array([np.sum(values[vi])\n for vi in vert_inds]))[0]\n if not len(hemi) == 1:\n raise ValueError('Could not infer hemisphere')\n hemi = hemi[0]\n _check_option('hemi', hemi, [0, 1])\n vertices = self.vertices[hemi]\n values = values[vert_inds[hemi]] # left or right\n del vert_inds\n vertex = _center_of_mass(\n vertices, values, hemi=['lh', 'rh'][hemi], surf=surf,\n subject=subject, subjects_dir=subjects_dir,\n restrict_vertices=restrict_vertices)\n # do time center of mass by using the values across space\n masses = np.sum(self.data, axis=0).astype(float)\n t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses)\n t = self.tmin + self.tstep * t_ind\n return vertex, hemi, t\n\n\nclass _BaseVectorSourceEstimate(_BaseSourceEstimate):\n\n _data_ndim = 3\n\n @verbose\n def __init__(self, data, vertices=None, tmin=None, tstep=None,\n subject=None, verbose=None): # noqa: D102\n assert hasattr(self, '_scalar_class')\n super().__init__(data, vertices, tmin, tstep, subject, verbose)\n\n def magnitude(self):\n \"\"\"Compute magnitude of activity without directionality.\n\n Returns\n -------\n stc : instance of SourceEstimate\n The source estimate without directionality information.\n \"\"\"\n data_mag = np.linalg.norm(self.data, axis=1)\n return self._scalar_class(\n data_mag, self.vertices, self.tmin, self.tstep, self.subject,\n self.verbose)\n\n def _get_src_normals(self, src, use_cps):\n normals = np.vstack([_get_src_nn(s, use_cps, v) for s, v in\n zip(src, self.vertices)])\n return normals\n\n @fill_doc\n def project(self, directions, src=None, use_cps=True):\n \"\"\"Project the data for each vertex in a given direction.\n\n Parameters\n ----------\n directions : ndarray, shape (n_vertices, 3) | str\n Can be:\n\n - ``'normal'``\n Project onto the source space normals.\n - ``'pca'``\n SVD will be used to project onto the direction of maximal\n power for each source.\n - :class:`~numpy.ndarray`, shape (n_vertices, 3)\n Projection directions for each source.\n src : instance of SourceSpaces | None\n The source spaces corresponding to the source estimate.\n Not used when ``directions`` is an array, optional when\n ``directions='pca'``.\n %(use_cps)s\n Should be the same value that was used when the forward model\n was computed (typically True).\n\n Returns\n -------\n stc : instance of SourceEstimate\n The projected source estimate.\n directions : ndarray, shape (n_vertices, 3)\n The directions that were computed (or just used).\n\n Notes\n -----\n When using SVD, there is a sign ambiguity for the direction of maximal\n power. When ``src is None``, the direction is chosen that makes the\n resulting time waveform sum positive (i.e., have positive amplitudes).\n When ``src`` is provided, the directions are flipped in the direction\n of the source normals, i.e., outward from cortex for surface source\n spaces and in the +Z / superior direction for volume source spaces.\n\n .. versionadded:: 0.21\n \"\"\"\n _validate_type(directions, (str, np.ndarray), 'directions')\n _validate_type(src, (None, SourceSpaces), 'src')\n if isinstance(directions, str):\n _check_option('directions', directions, ('normal', 'pca'),\n extra='when str')\n\n if directions == 'normal':\n if src is None:\n raise ValueError(\n 'If directions=\"normal\", src cannot be None')\n _check_src_normal('normal', src)\n directions = self._get_src_normals(src, use_cps)\n else:\n assert directions == 'pca'\n x = self.data\n if not np.isrealobj(self.data):\n _check_option('stc.data.dtype', self.data.dtype,\n (np.complex64, np.complex128))\n dtype = \\\n np.float32 if x.dtype == np.complex64 else np.float64\n x = x.view(dtype)\n assert x.shape[-1] == 2 * self.data.shape[-1]\n u, _, v = np.linalg.svd(x, full_matrices=False)\n directions = u[:, :, 0]\n # The sign is arbitrary, so let's flip it in the direction that\n # makes the resulting time series the most positive:\n if src is None:\n signs = np.sum(v[:, 0].real, axis=1, keepdims=True)\n else:\n normals = self._get_src_normals(src, use_cps)\n signs = np.sum(directions * normals, axis=1, keepdims=True)\n assert signs.shape == (self.data.shape[0], 1)\n signs = np.sign(signs)\n signs[signs == 0] = 1.\n directions *= signs\n _check_option(\n 'directions.shape', directions.shape, [(self.data.shape[0], 3)])\n data_norm = np.matmul(directions[:, np.newaxis], self.data)[:, 0]\n stc = self._scalar_class(\n data_norm, self.vertices, self.tmin, self.tstep, self.subject,\n self.verbose)\n return stc, directions\n\n @copy_function_doc_to_method_doc(plot_vector_source_estimates)\n def plot(self, subject=None, hemi='lh', colormap='hot', time_label='auto',\n smoothing_steps=10, transparent=True, brain_alpha=0.4,\n overlay_alpha=None, vector_alpha=1.0, scale_factor=None,\n time_viewer='auto', subjects_dir=None, figure=None,\n views='lateral',\n colorbar=True, clim='auto', cortex='classic', size=800,\n background='black', foreground=None, initial_time=None,\n time_unit='s', show_traces='auto', src=None, volume_options=1.,\n view_layout='vertical', add_data_kwargs=None,\n verbose=None): # noqa: D102\n return plot_vector_source_estimates(\n self, subject=subject, hemi=hemi, colormap=colormap,\n time_label=time_label, smoothing_steps=smoothing_steps,\n transparent=transparent, brain_alpha=brain_alpha,\n overlay_alpha=overlay_alpha, vector_alpha=vector_alpha,\n scale_factor=scale_factor, time_viewer=time_viewer,\n subjects_dir=subjects_dir, figure=figure, views=views,\n colorbar=colorbar, clim=clim, cortex=cortex, size=size,\n background=background, foreground=foreground,\n initial_time=initial_time, time_unit=time_unit,\n show_traces=show_traces, src=src, volume_options=volume_options,\n view_layout=view_layout, add_data_kwargs=add_data_kwargs,\n verbose=verbose)\n\n\nclass _BaseVolSourceEstimate(_BaseSourceEstimate):\n\n _src_type = 'volume'\n _src_count = None\n\n @copy_function_doc_to_method_doc(plot_source_estimates)\n def plot_3d(self, subject=None, surface='white', hemi='both',\n colormap='auto', time_label='auto', smoothing_steps=10,\n transparent=True, alpha=0.1, time_viewer='auto',\n subjects_dir=None,\n figure=None, views='axial', colorbar=True, clim='auto',\n cortex=\"classic\", size=800, background=\"black\",\n foreground=None, initial_time=None, time_unit='s',\n backend='auto', spacing='oct6', title=None, show_traces='auto',\n src=None, volume_options=1., view_layout='vertical',\n add_data_kwargs=None, verbose=None):\n return super().plot(\n subject=subject, surface=surface, hemi=hemi, colormap=colormap,\n time_label=time_label, smoothing_steps=smoothing_steps,\n transparent=transparent, alpha=alpha, time_viewer=time_viewer,\n subjects_dir=subjects_dir,\n figure=figure, views=views, colorbar=colorbar, clim=clim,\n cortex=cortex, size=size, background=background,\n foreground=foreground, initial_time=initial_time,\n time_unit=time_unit, backend=backend, spacing=spacing, title=title,\n show_traces=show_traces, src=src, volume_options=volume_options,\n view_layout=view_layout, add_data_kwargs=add_data_kwargs,\n verbose=verbose)\n\n @copy_function_doc_to_method_doc(plot_volume_source_estimates)\n def plot(self, src, subject=None, subjects_dir=None, mode='stat_map',\n bg_img='T1.mgz', colorbar=True, colormap='auto', clim='auto',\n transparent='auto', show=True, initial_time=None,\n initial_pos=None, verbose=None):\n data = self.magnitude() if self._data_ndim == 3 else self\n return plot_volume_source_estimates(\n data, src=src, subject=subject, subjects_dir=subjects_dir,\n mode=mode, bg_img=bg_img, colorbar=colorbar, colormap=colormap,\n clim=clim, transparent=transparent, show=show,\n initial_time=initial_time, initial_pos=initial_pos,\n verbose=verbose)\n\n # Override here to provide the volume-specific options\n @verbose\n def extract_label_time_course(self, labels, src, mode='auto',\n allow_empty=False, *,\n mri_resolution=True, verbose=None):\n \"\"\"Extract label time courses for lists of labels.\n\n This function will extract one time course for each label. The way the\n time courses are extracted depends on the mode parameter.\n\n Parameters\n ----------\n %(eltc_labels)s\n %(eltc_src)s\n %(eltc_mode)s\n %(eltc_allow_empty)s\n %(eltc_mri_resolution)s\n %(verbose_meth)s\n\n Returns\n -------\n %(eltc_returns)s\n\n See Also\n --------\n extract_label_time_course : Extract time courses for multiple STCs.\n\n Notes\n -----\n %(eltc_mode_notes)s\n \"\"\"\n return extract_label_time_course(\n self, labels, src, mode=mode, return_generator=False,\n allow_empty=allow_empty,\n mri_resolution=mri_resolution, verbose=verbose)\n\n @verbose\n def in_label(self, label, mri, src, *, verbose=None):\n \"\"\"Get a source estimate object restricted to a label.\n\n SourceEstimate contains the time course of\n activation of all sources inside the label.\n\n Parameters\n ----------\n label : str | int\n The label to use. Can be the name of a label if using a standard\n FreeSurfer atlas, or an integer value to extract from the ``mri``.\n mri : str\n Path to the atlas to use.\n src : instance of SourceSpaces\n The volumetric source space. It must be a single, whole-brain\n volume.\n %(verbose_meth)s\n\n Returns\n -------\n stc : VolSourceEstimate | VolVectorSourceEstimate\n The source estimate restricted to the given label.\n\n Notes\n -----\n .. versionadded:: 0.21.0\n \"\"\"\n if len(self.vertices) != 1:\n raise RuntimeError('This method can only be used with whole-brain '\n 'volume source spaces')\n _validate_type(label, (str, 'int-like'), 'label')\n if isinstance(label, str):\n volume_label = [label]\n else:\n volume_label = {'Volume ID %s' % (label): _ensure_int(label)}\n label = _volume_labels(src, (mri, volume_label), mri_resolution=False)\n assert len(label) == 1\n label = label[0]\n vertices = label.vertices\n keep = np.in1d(self.vertices[0], label.vertices)\n values, vertices = self.data[keep], [self.vertices[0][keep]]\n label_stc = self.__class__(values, vertices=vertices, tmin=self.tmin,\n tstep=self.tstep, subject=self.subject)\n return label_stc\n\n def save_as_volume(self, fname, src, dest='mri', mri_resolution=False,\n format='nifti1'):\n \"\"\"Save a volume source estimate in a NIfTI file.\n\n Parameters\n ----------\n fname : str\n The name of the generated nifti file.\n src : list\n The list of source spaces (should all be of type volume).\n dest : 'mri' | 'surf'\n If 'mri' the volume is defined in the coordinate system of\n the original T1 image. If 'surf' the coordinate system\n of the FreeSurfer surface is used (Surface RAS).\n mri_resolution : bool\n It True the image is saved in MRI resolution.\n\n .. warning:: If you have many time points, the file produced can be\n huge.\n format : str\n Either 'nifti1' (default) or 'nifti2'.\n\n .. versionadded:: 0.17\n\n Returns\n -------\n img : instance Nifti1Image\n The image object.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n import nibabel as nib\n _validate_type(fname, 'path-like', 'fname')\n fname = str(fname)\n img = self.as_volume(src, dest=dest, mri_resolution=mri_resolution,\n format=format)\n nib.save(img, fname)\n\n def as_volume(self, src, dest='mri', mri_resolution=False,\n format='nifti1'):\n \"\"\"Export volume source estimate as a nifti object.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source spaces (should all be of type volume, or part of a\n mixed source space).\n dest : 'mri' | 'surf'\n If 'mri' the volume is defined in the coordinate system of\n the original T1 image. If 'surf' the coordinate system\n of the FreeSurfer surface is used (Surface RAS).\n mri_resolution : bool\n It True the image is saved in MRI resolution.\n\n .. warning:: If you have many time points, the file produced can be\n huge.\n format : str\n Either 'nifti1' (default) or 'nifti2'.\n\n Returns\n -------\n img : instance of Nifti1Image\n The image object.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n from .morph import _interpolate_data\n data = self.magnitude() if self._data_ndim == 3 else self\n return _interpolate_data(data, src, mri_resolution=mri_resolution,\n mri_space=True, output=format)\n\n\n@fill_doc\nclass VolSourceEstimate(_BaseVolSourceEstimate):\n \"\"\"Container for volume source estimates.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, n_times) | tuple, shape (2,)\n The data in source space. The data can either be a single array or\n a tuple with two arrays: \"kernel\" shape (n_vertices, n_sensors) and\n \"sens_data\" shape (n_sensors, n_times). In this case, the source\n space data corresponds to ``np.dot(kernel, sens_data)``.\n vertices : array of shape (n_dipoles,)\n The indices of the dipoles in the source space.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : array of shape (n_dipoles,)\n The indices of the dipoles in the source space.\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n SourceEstimate : A container for surface source estimates.\n VolVectorSourceEstimate : A container for volume vector source estimates.\n MixedSourceEstimate : A container for mixed surface + volume source\n estimates.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n\n @verbose\n def save(self, fname, ftype='stc', verbose=None):\n \"\"\"Save the source estimates to a file.\n\n Parameters\n ----------\n fname : str\n The stem of the file name. The stem is extended with \"-vl.stc\"\n or \"-vl.w\".\n ftype : str\n File format to use. Allowed values are \"stc\" (default), \"w\",\n and \"h5\". The \"w\" format only supports a single time point.\n %(verbose_meth)s\n \"\"\"\n _validate_type(fname, 'path-like', 'fname')\n fname = str(fname)\n _check_option('ftype', ftype, ['stc', 'w', 'h5'])\n if ftype != 'h5' and len(self.vertices) != 1:\n raise ValueError('Can only write to .stc or .w if a single volume '\n 'source space was used, use .h5 instead')\n if ftype != 'h5' and self.data.dtype == 'complex':\n raise ValueError('Can only write non-complex data to .stc or .w'\n ', use .h5 instead')\n if ftype == 'stc':\n logger.info('Writing STC to disk...')\n if not (fname.endswith('-vl.stc') or fname.endswith('-vol.stc')):\n fname += '-vl.stc'\n _write_stc(fname, tmin=self.tmin, tstep=self.tstep,\n vertices=self.vertices[0], data=self.data)\n elif ftype == 'w':\n logger.info('Writing STC to disk (w format)...')\n if not (fname.endswith('-vl.w') or fname.endswith('-vol.w')):\n fname += '-vl.w'\n _write_w(fname, vertices=self.vertices[0], data=self.data)\n elif ftype == 'h5':\n super().save(fname, 'h5')\n logger.info('[done]')\n\n\n@fill_doc\nclass VolVectorSourceEstimate(_BaseVolSourceEstimate,\n _BaseVectorSourceEstimate):\n \"\"\"Container for volume source estimates.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, 3, n_times)\n The data in source space. Each dipole contains three vectors that\n denote the dipole strength in X, Y and Z directions over time.\n vertices : array of shape (n_dipoles,)\n The indices of the dipoles in the source space.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : array of shape (n_dipoles,)\n The indices of the dipoles in the source space.\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n SourceEstimate : A container for surface source estimates.\n VectorSourceEstimate : A container for vector source estimates.\n MixedSourceEstimate : A container for mixed surface + volume source\n estimates.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n\n _scalar_class = VolSourceEstimate\n\n # defaults differ: hemi='both', views='axial'\n @copy_function_doc_to_method_doc(plot_vector_source_estimates)\n def plot_3d(self, subject=None, hemi='both', colormap='hot',\n time_label='auto',\n smoothing_steps=10, transparent=True, brain_alpha=0.4,\n overlay_alpha=None, vector_alpha=1.0, scale_factor=None,\n time_viewer='auto', subjects_dir=None, figure=None,\n views='axial',\n colorbar=True, clim='auto', cortex='classic', size=800,\n background='black', foreground=None, initial_time=None,\n time_unit='s', show_traces='auto', src=None,\n volume_options=1., view_layout='vertical',\n add_data_kwargs=None, verbose=None): # noqa: D102\n return _BaseVectorSourceEstimate.plot(\n self, subject=subject, hemi=hemi, colormap=colormap,\n time_label=time_label, smoothing_steps=smoothing_steps,\n transparent=transparent, brain_alpha=brain_alpha,\n overlay_alpha=overlay_alpha, vector_alpha=vector_alpha,\n scale_factor=scale_factor, time_viewer=time_viewer,\n subjects_dir=subjects_dir, figure=figure, views=views,\n colorbar=colorbar, clim=clim, cortex=cortex, size=size,\n background=background, foreground=foreground,\n initial_time=initial_time, time_unit=time_unit,\n show_traces=show_traces, src=src, volume_options=volume_options,\n view_layout=view_layout, add_data_kwargs=add_data_kwargs,\n verbose=verbose)\n\n\n@fill_doc\nclass VectorSourceEstimate(_BaseVectorSourceEstimate,\n _BaseSurfaceSourceEstimate):\n \"\"\"Container for vector surface source estimates.\n\n For each vertex, the magnitude of the current is defined in the X, Y and Z\n directions.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, 3, n_times)\n The data in source space. Each dipole contains three vectors that\n denote the dipole strength in X, Y and Z directions over time.\n vertices : list of array, shape (2,)\n Vertex numbers corresponding to the data. The first element of the list\n contains vertices of left hemisphere and the second element contains\n vertices of right hemisphere.\n tmin : float\n Time point of the first sample in data.\n tstep : float\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n SourceEstimate : A container for surface source estimates.\n VolSourceEstimate : A container for volume source estimates.\n MixedSourceEstimate : A container for mixed surface + volume source\n estimates.\n\n Notes\n -----\n .. versionadded:: 0.15\n \"\"\"\n\n _scalar_class = SourceEstimate\n\n\n###############################################################################\n# Mixed source estimate (two cortical surfs plus other stuff)\n\nclass _BaseMixedSourceEstimate(_BaseSourceEstimate):\n\n _src_type = 'mixed'\n _src_count = None\n\n @verbose\n def __init__(self, data, vertices=None, tmin=None, tstep=None,\n subject=None, verbose=None): # noqa: D102\n if not isinstance(vertices, list) or len(vertices) < 2:\n raise ValueError('Vertices must be a list of numpy arrays with '\n 'one array per source space.')\n super().__init__(data, vertices=vertices, tmin=tmin,\n tstep=tstep, subject=subject,\n verbose=verbose)\n\n @property\n def _n_surf_vert(self):\n return sum(len(v) for v in self.vertices[:2])\n\n def surface(self):\n \"\"\"Return the cortical surface source estimate.\n\n Returns\n -------\n stc : instance of SourceEstimate or VectorSourceEstimate\n The surface source estimate.\n \"\"\"\n if self._data_ndim == 3:\n klass = VectorSourceEstimate\n else:\n klass = SourceEstimate\n return klass(\n self.data[:self._n_surf_vert], self.vertices[:2],\n self.tmin, self.tstep, self.subject, self.verbose)\n\n def volume(self):\n \"\"\"Return the volume surface source estimate.\n\n Returns\n -------\n stc : instance of VolSourceEstimate or VolVectorSourceEstimate\n The volume source estimate.\n \"\"\"\n if self._data_ndim == 3:\n klass = VolVectorSourceEstimate\n else:\n klass = VolSourceEstimate\n return klass(\n self.data[self._n_surf_vert:], self.vertices[2:],\n self.tmin, self.tstep, self.subject, self.verbose)\n\n\n@fill_doc\nclass MixedSourceEstimate(_BaseMixedSourceEstimate):\n \"\"\"Container for mixed surface and volume source estimates.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, n_times) | tuple, shape (2,)\n The data in source space. The data can either be a single array or\n a tuple with two arrays: \"kernel\" shape (n_vertices, n_sensors) and\n \"sens_data\" shape (n_sensors, n_times). In this case, the source\n space data corresponds to ``np.dot(kernel, sens_data)``.\n vertices : list of array\n Vertex numbers corresponding to the data. The list contains arrays\n with one array per source space.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : list of array\n Vertex numbers corresponding to the data. The list contains arrays\n with one array per source space.\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n SourceEstimate : A container for surface source estimates.\n VectorSourceEstimate : A container for vector source estimates.\n VolSourceEstimate : A container for volume source estimates.\n VolVectorSourceEstimate : A container for Volume vector source estimates.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n\n\n@fill_doc\nclass MixedVectorSourceEstimate(_BaseVectorSourceEstimate,\n _BaseMixedSourceEstimate):\n \"\"\"Container for volume source estimates.\n\n Parameters\n ----------\n data : array, shape (n_dipoles, 3, n_times)\n The data in source space. Each dipole contains three vectors that\n denote the dipole strength in X, Y and Z directions over time.\n vertices : list of array, shape (n_src,)\n Vertex numbers corresponding to the data.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array, shape (n_times,)\n The time vector.\n vertices : array of shape (n_dipoles,)\n The indices of the dipoles in the source space.\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n MixedSourceEstimate : A container for mixed surface + volume source\n estimates.\n\n Notes\n -----\n .. versionadded:: 0.21.0\n \"\"\"\n\n _scalar_class = MixedSourceEstimate\n\n\n###############################################################################\n# Morphing\n\ndef _get_vol_mask(src):\n \"\"\"Get the volume source space mask.\"\"\"\n assert len(src) == 1 # not a mixed source space\n shape = src[0]['shape'][::-1]\n mask = np.zeros(shape, bool)\n mask.flat[src[0]['vertno']] = True\n return mask\n\n\ndef _spatio_temporal_src_adjacency_vol(src, n_times):\n from sklearn.feature_extraction import grid_to_graph\n mask = _get_vol_mask(src)\n edges = grid_to_graph(*mask.shape, mask=mask)\n adjacency = _get_adjacency_from_edges(edges, n_times)\n return adjacency\n\n\ndef _spatio_temporal_src_adjacency_surf(src, n_times):\n if src[0]['use_tris'] is None:\n # XXX It would be nice to support non oct source spaces too...\n raise RuntimeError(\"The source space does not appear to be an ico \"\n \"surface. adjacency cannot be extracted from\"\n \" non-ico source spaces.\")\n used_verts = [np.unique(s['use_tris']) for s in src]\n offs = np.cumsum([0] + [len(u_v) for u_v in used_verts])[:-1]\n tris = np.concatenate([np.searchsorted(u_v, s['use_tris']) + off\n for u_v, s, off in zip(used_verts, src, offs)])\n adjacency = spatio_temporal_tris_adjacency(tris, n_times)\n\n # deal with source space only using a subset of vertices\n masks = [np.in1d(u, s['vertno']) for s, u in zip(src, used_verts)]\n if sum(u.size for u in used_verts) != adjacency.shape[0] / n_times:\n raise ValueError('Used vertices do not match adjacency shape')\n if [np.sum(m) for m in masks] != [len(s['vertno']) for s in src]:\n raise ValueError('Vertex mask does not match number of vertices')\n masks = np.concatenate(masks)\n missing = 100 * float(len(masks) - np.sum(masks)) / len(masks)\n if missing:\n warn('%0.1f%% of original source space vertices have been'\n ' omitted, tri-based adjacency will have holes.\\n'\n 'Consider using distance-based adjacency or '\n 'morphing data to all source space vertices.' % missing)\n masks = np.tile(masks, n_times)\n masks = np.where(masks)[0]\n adjacency = adjacency.tocsr()\n adjacency = adjacency[masks]\n adjacency = adjacency[:, masks]\n # return to original format\n adjacency = adjacency.tocoo()\n return adjacency\n\n\n@verbose\ndef spatio_temporal_src_adjacency(src, n_times, dist=None, verbose=None):\n \"\"\"Compute adjacency for a source space activation over time.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space. It can be a surface source space or a\n volume source space.\n n_times : int\n Number of time instants.\n dist : float, or None\n Maximal geodesic distance (in m) between vertices in the\n source space to consider neighbors. If None, immediate neighbors\n are extracted from an ico surface.\n %(verbose)s\n\n Returns\n -------\n adjacency : ~scipy.sparse.coo_matrix\n The adjacency matrix describing the spatio-temporal\n graph structure. If N is the number of vertices in the\n source space, the N first nodes in the graph are the\n vertices are time 1, the nodes from 2 to 2N are the vertices\n during time 2, etc.\n \"\"\"\n # XXX we should compute adjacency for each source space and then\n # use scipy.sparse.block_diag to concatenate them\n if src[0]['type'] == 'vol':\n if dist is not None:\n raise ValueError('dist must be None for a volume '\n 'source space. Got %s.' % dist)\n\n adjacency = _spatio_temporal_src_adjacency_vol(src, n_times)\n elif dist is not None:\n # use distances computed and saved in the source space file\n adjacency = spatio_temporal_dist_adjacency(src, n_times, dist)\n else:\n adjacency = _spatio_temporal_src_adjacency_surf(src, n_times)\n return adjacency\n\n\n@verbose\ndef grade_to_tris(grade, verbose=None):\n \"\"\"Get tris defined for a certain grade.\n\n Parameters\n ----------\n grade : int\n Grade of an icosahedral mesh.\n %(verbose)s\n\n Returns\n -------\n tris : list\n 2-element list containing Nx3 arrays of tris, suitable for use in\n spatio_temporal_tris_adjacency.\n \"\"\"\n a = _get_ico_tris(grade, None, False)\n tris = np.concatenate((a, a + (np.max(a) + 1)))\n return tris\n\n\n@verbose\ndef spatio_temporal_tris_adjacency(tris, n_times, remap_vertices=False,\n verbose=None):\n \"\"\"Compute adjacency from triangles and time instants.\n\n Parameters\n ----------\n tris : array\n N x 3 array defining triangles.\n n_times : int\n Number of time points.\n remap_vertices : bool\n Reassign vertex indices based on unique values. Useful\n to process a subset of triangles. Defaults to False.\n %(verbose)s\n\n Returns\n -------\n adjacency : ~scipy.sparse.coo_matrix\n The adjacency matrix describing the spatio-temporal\n graph structure. If N is the number of vertices in the\n source space, the N first nodes in the graph are the\n vertices are time 1, the nodes from 2 to 2N are the vertices\n during time 2, etc.\n \"\"\"\n if remap_vertices:\n logger.info('Reassigning vertex indices.')\n tris = np.searchsorted(np.unique(tris), tris)\n\n edges = mesh_edges(tris)\n edges = (edges + sparse.eye(edges.shape[0], format='csr')).tocoo()\n return _get_adjacency_from_edges(edges, n_times)\n\n\n@verbose\ndef spatio_temporal_dist_adjacency(src, n_times, dist, verbose=None):\n \"\"\"Compute adjacency from distances in a source space and time instants.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space must have distances between vertices computed, such\n that src['dist'] exists and is useful. This can be obtained\n with a call to :func:`mne.setup_source_space` with the\n ``add_dist=True`` option.\n n_times : int\n Number of time points.\n dist : float\n Maximal geodesic distance (in m) between vertices in the\n source space to consider neighbors.\n %(verbose)s\n\n Returns\n -------\n adjacency : ~scipy.sparse.coo_matrix\n The adjacency matrix describing the spatio-temporal\n graph structure. If N is the number of vertices in the\n source space, the N first nodes in the graph are the\n vertices are time 1, the nodes from 2 to 2N are the vertices\n during time 2, etc.\n \"\"\"\n if src[0]['dist'] is None:\n raise RuntimeError('src must have distances included, consider using '\n 'setup_source_space with add_dist=True')\n blocks = [s['dist'][s['vertno'], :][:, s['vertno']] for s in src]\n # Ensure we keep explicit zeros; deal with changes in SciPy\n for block in blocks:\n if isinstance(block, np.ndarray):\n block[block == 0] = -np.inf\n else:\n block.data[block.data == 0] == -1\n edges = sparse_block_diag(blocks)\n edges.data[:] = np.less_equal(edges.data, dist)\n # clean it up and put it in coo format\n edges = edges.tocsr()\n edges.eliminate_zeros()\n edges = edges.tocoo()\n return _get_adjacency_from_edges(edges, n_times)\n\n\n@verbose\ndef spatial_src_adjacency(src, dist=None, verbose=None):\n \"\"\"Compute adjacency for a source space activation.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space. It can be a surface source space or a\n volume source space.\n dist : float, or None\n Maximal geodesic distance (in m) between vertices in the\n source space to consider neighbors. If None, immediate neighbors\n are extracted from an ico surface.\n %(verbose)s\n\n Returns\n -------\n adjacency : ~scipy.sparse.coo_matrix\n The adjacency matrix describing the spatial graph structure.\n \"\"\"\n return spatio_temporal_src_adjacency(src, 1, dist)\n\n\n@verbose\ndef spatial_tris_adjacency(tris, remap_vertices=False, verbose=None):\n \"\"\"Compute adjacency from triangles.\n\n Parameters\n ----------\n tris : array\n N x 3 array defining triangles.\n remap_vertices : bool\n Reassign vertex indices based on unique values. Useful\n to process a subset of triangles. Defaults to False.\n %(verbose)s\n\n Returns\n -------\n adjacency : ~scipy.sparse.coo_matrix\n The adjacency matrix describing the spatial graph structure.\n \"\"\"\n return spatio_temporal_tris_adjacency(tris, 1, remap_vertices)\n\n\n@verbose\ndef spatial_dist_adjacency(src, dist, verbose=None):\n \"\"\"Compute adjacency from distances in a source space.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space must have distances between vertices computed, such\n that src['dist'] exists and is useful. This can be obtained\n with a call to :func:`mne.setup_source_space` with the\n ``add_dist=True`` option.\n dist : float\n Maximal geodesic distance (in m) between vertices in the\n source space to consider neighbors.\n %(verbose)s\n\n Returns\n -------\n adjacency : ~scipy.sparse.coo_matrix\n The adjacency matrix describing the spatial graph structure.\n \"\"\"\n return spatio_temporal_dist_adjacency(src, 1, dist)\n\n\n@verbose\ndef spatial_inter_hemi_adjacency(src, dist, verbose=None):\n \"\"\"Get vertices on each hemisphere that are close to the other hemisphere.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space. Must be surface type.\n dist : float\n Maximal Euclidean distance (in m) between vertices in one hemisphere\n compared to the other to consider neighbors.\n %(verbose)s\n\n Returns\n -------\n adjacency : ~scipy.sparse.coo_matrix\n The adjacency matrix describing the spatial graph structure.\n Typically this should be combined (addititively) with another\n existing intra-hemispheric adjacency matrix, e.g. computed\n using geodesic distances.\n \"\"\"\n from scipy.spatial.distance import cdist\n src = _ensure_src(src, kind='surface')\n adj = cdist(src[0]['rr'][src[0]['vertno']],\n src[1]['rr'][src[1]['vertno']])\n adj = sparse.csr_matrix(adj <= dist, dtype=int)\n empties = [sparse.csr_matrix((nv, nv), dtype=int) for nv in adj.shape]\n adj = sparse.vstack([sparse.hstack([empties[0], adj]),\n sparse.hstack([adj.T, empties[1]])])\n return adj\n\n\n@verbose\ndef _get_adjacency_from_edges(edges, n_times, verbose=None):\n \"\"\"Given edges sparse matrix, create adjacency matrix.\"\"\"\n n_vertices = edges.shape[0]\n logger.info(\"-- number of adjacent vertices : %d\" % n_vertices)\n nnz = edges.col.size\n aux = n_vertices * np.tile(np.arange(n_times)[:, None], (1, nnz))\n col = (edges.col[None, :] + aux).ravel()\n row = (edges.row[None, :] + aux).ravel()\n if n_times > 1: # add temporal edges\n o = (n_vertices * np.arange(n_times - 1)[:, None] +\n np.arange(n_vertices)[None, :]).ravel()\n d = (n_vertices * np.arange(1, n_times)[:, None] +\n np.arange(n_vertices)[None, :]).ravel()\n row = np.concatenate((row, o, d))\n col = np.concatenate((col, d, o))\n data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1),\n dtype=np.int64)\n adjacency = coo_matrix((data, (row, col)),\n shape=(n_times * n_vertices,) * 2)\n return adjacency\n\n\n@verbose\ndef _get_ico_tris(grade, verbose=None, return_surf=False):\n \"\"\"Get triangles for ico surface.\"\"\"\n ico = _get_ico_surface(grade)\n if not return_surf:\n return ico['tris']\n else:\n return ico\n\n\ndef _pca_flip(flip, data):\n U, s, V = linalg.svd(data, full_matrices=False)\n # determine sign-flip\n sign = np.sign(np.dot(U[:, 0], flip))\n # use average power in label for scaling\n scale = linalg.norm(s) / np.sqrt(len(data))\n return sign * scale * V[0]\n\n\n_label_funcs = {\n 'mean': lambda flip, data: np.mean(data, axis=0),\n 'mean_flip': lambda flip, data: np.mean(flip * data, axis=0),\n 'max': lambda flip, data: np.max(np.abs(data), axis=0),\n 'pca_flip': _pca_flip,\n}\n\n\[email protected]\ndef _temporary_vertices(src, vertices):\n orig_vertices = [s['vertno'] for s in src]\n for s, v in zip(src, vertices):\n s['vertno'] = v\n try:\n yield\n finally:\n for s, v in zip(src, orig_vertices):\n s['vertno'] = v\n\n\ndef _check_stc_src(stc, src):\n if stc is not None and src is not None:\n for s, v, hemi in zip(src, stc.vertices, ('left', 'right')):\n n_missing = (~np.in1d(v, s['vertno'])).sum()\n if n_missing:\n raise ValueError('%d/%d %s hemisphere stc vertices '\n 'missing from the source space, likely '\n 'mismatch' % (n_missing, len(v), hemi))\n\n\ndef _prepare_label_extraction(stc, labels, src, mode, allow_empty, use_sparse):\n \"\"\"Prepare indices and flips for extract_label_time_course.\"\"\"\n # If src is a mixed src space, the first 2 src spaces are surf type and\n # the other ones are vol type. For mixed source space n_labels will be\n # given by the number of ROIs of the cortical parcellation plus the number\n # of vol src space.\n # If stc=None (i.e. no activation time courses provided) and mode='mean',\n # only computes vertex indices and label_flip will be list of None.\n from .label import label_sign_flip, Label, BiHemiLabel\n\n # if source estimate provided in stc, get vertices from source space and\n # check that they are the same as in the stcs\n _check_stc_src(stc, src)\n vertno = [s['vertno'] for s in src] if stc is None else stc.vertices\n nvert = [len(vn) for vn in vertno]\n\n # initialization\n label_flip = list()\n label_vertidx = list()\n\n bad_labels = list()\n for li, label in enumerate(labels):\n if use_sparse:\n assert isinstance(label, dict)\n vertidx = label['csr']\n # This can happen if some labels aren't present in the space\n if vertidx.shape[0] == 0:\n bad_labels.append(label['name'])\n vertidx = None\n # Efficiency shortcut: use linearity early to avoid redundant\n # calculations\n elif mode == 'mean':\n vertidx = sparse.csr_matrix(vertidx.mean(axis=0))\n label_vertidx.append(vertidx)\n label_flip.append(None)\n continue\n # standard case\n _validate_type(label, (Label, BiHemiLabel), 'labels[%d]' % (li,))\n\n if label.hemi == 'both':\n # handle BiHemiLabel\n sub_labels = [label.lh, label.rh]\n else:\n sub_labels = [label]\n this_vertidx = list()\n for slabel in sub_labels:\n if slabel.hemi == 'lh':\n this_vertices = np.intersect1d(vertno[0], slabel.vertices)\n vertidx = np.searchsorted(vertno[0], this_vertices)\n elif slabel.hemi == 'rh':\n this_vertices = np.intersect1d(vertno[1], slabel.vertices)\n vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertices)\n else:\n raise ValueError('label %s has invalid hemi' % label.name)\n this_vertidx.append(vertidx)\n\n # convert it to an array\n this_vertidx = np.concatenate(this_vertidx)\n this_flip = None\n if len(this_vertidx) == 0:\n bad_labels.append(label.name)\n this_vertidx = None # to later check if label is empty\n elif mode not in ('mean', 'max'): # mode-dependent initialization\n # label_sign_flip uses two properties:\n #\n # - src[ii]['nn']\n # - src[ii]['vertno']\n #\n # So if we override vertno with the stc vertices, it will pick\n # the correct normals.\n with _temporary_vertices(src, stc.vertices):\n this_flip = label_sign_flip(label, src[:2])[:, None]\n\n label_vertidx.append(this_vertidx)\n label_flip.append(this_flip)\n\n if len(bad_labels):\n msg = ('source space does not contain any vertices for %d label%s:\\n%s'\n % (len(bad_labels), _pl(bad_labels), bad_labels))\n if not allow_empty:\n raise ValueError(msg)\n else:\n msg += '\\nAssigning all-zero time series.'\n if allow_empty == 'ignore':\n logger.info(msg)\n else:\n warn(msg)\n\n return label_vertidx, label_flip\n\n\ndef _vol_src_rr(src):\n return apply_trans(\n src[0]['src_mri_t'], np.array(\n [d.ravel(order='F')\n for d in np.meshgrid(\n *(np.arange(s) for s in src[0]['shape']),\n indexing='ij')],\n float).T)\n\n\ndef _volume_labels(src, labels, mri_resolution):\n # This will create Label objects that should do the right thing for our\n # given volumetric source space when used with extract_label_time_course\n from .label import Label\n assert src.kind == 'volume'\n extra = ' when using a volume source space'\n _import_nibabel('use volume atlas labels')\n _validate_type(labels, ('path-like', list, tuple), 'labels' + extra)\n if _check_path_like(labels):\n mri = labels\n infer_labels = True\n else:\n if len(labels) != 2:\n raise ValueError('labels, if list or tuple, must have length 2, '\n 'got %s' % (len(labels),))\n mri, labels = labels\n infer_labels = False\n _validate_type(mri, 'path-like', 'labels[0]' + extra)\n logger.info('Reading atlas %s' % (mri,))\n vol_info = _get_mri_info_data(str(mri), data=True)\n atlas_data = vol_info['data']\n atlas_values = np.unique(atlas_data)\n if atlas_values.dtype.kind == 'f': # MGZ will be 'i'\n atlas_values = atlas_values[np.isfinite(atlas_values)]\n if not (atlas_values == np.round(atlas_values)).all():\n raise RuntimeError('Non-integer values present in atlas, cannot '\n 'labelize')\n atlas_values = np.round(atlas_values).astype(np.int64)\n if infer_labels:\n labels = {\n k: v for k, v in read_freesurfer_lut()[0].items()\n if v in atlas_values}\n labels = _check_volume_labels(labels, mri, name='labels[1]')\n assert isinstance(labels, dict)\n del atlas_values\n\n vox_mri_t = vol_info['vox_mri_t']\n want = src[0].get('vox_mri_t', None)\n if want is None:\n raise RuntimeError(\n 'Cannot use volumetric atlas if no mri was supplied during '\n 'source space creation')\n vox_mri_t, want = vox_mri_t['trans'], want['trans']\n if not np.allclose(vox_mri_t, want, atol=1e-6):\n raise RuntimeError(\n 'atlas vox_mri_t does not match that used to create the source '\n 'space')\n src_shape = tuple(src[0]['mri_' + k] for k in ('width', 'height', 'depth'))\n atlas_shape = atlas_data.shape\n if atlas_shape != src_shape:\n raise RuntimeError('atlas shape %s does not match source space MRI '\n 'shape %s' % (atlas_shape, src_shape))\n atlas_data = atlas_data.ravel(order='F')\n if mri_resolution:\n # Upsample then just index\n out_labels = list()\n nnz = 0\n interp = src[0]['interpolator']\n # should be guaranteed by size checks above and our src interp code\n assert interp.shape[0] == np.prod(src_shape)\n assert interp.shape == (atlas_data.size, len(src[0]['rr']))\n interp = interp[:, src[0]['vertno']]\n for k, v in labels.items():\n mask = atlas_data == v\n csr = interp[mask]\n out_labels.append(dict(csr=csr, name=k))\n nnz += csr.shape[0] > 0\n else:\n # Use nearest values\n vertno = src[0]['vertno']\n rr = _vol_src_rr(src)\n del src\n src_values = _get_atlas_values(vol_info, rr[vertno])\n vertices = [vertno[src_values == val] for val in labels.values()]\n out_labels = [Label(v, hemi='lh', name=val)\n for v, val in zip(vertices, labels.keys())]\n nnz = sum(len(v) != 0 for v in vertices)\n logger.info('%d/%d atlas regions had at least one vertex '\n 'in the source space' % (nnz, len(out_labels)))\n return out_labels\n\n\ndef _get_default_label_modes():\n return sorted(_label_funcs.keys()) + ['auto']\n\n\ndef _get_allowed_label_modes(stc):\n if isinstance(stc, (_BaseVolSourceEstimate,\n _BaseVectorSourceEstimate)):\n return ('mean', 'max', 'auto')\n else:\n return _get_default_label_modes()\n\n\ndef _gen_extract_label_time_course(stcs, labels, src, *, mode='mean',\n allow_empty=False,\n mri_resolution=True, verbose=None):\n # loop through source estimates and extract time series\n if src is None and mode in ['mean', 'max']:\n kind = 'surface'\n else:\n _validate_type(src, SourceSpaces)\n kind = src.kind\n _check_option('mode', mode, _get_default_label_modes())\n\n if kind in ('surface', 'mixed'):\n if not isinstance(labels, list):\n labels = [labels]\n use_sparse = False\n else:\n labels = _volume_labels(src, labels, mri_resolution)\n use_sparse = bool(mri_resolution)\n n_mode = len(labels) # how many processed with the given mode\n n_mean = len(src[2:]) if kind == 'mixed' else 0\n n_labels = n_mode + n_mean\n vertno = func = None\n for si, stc in enumerate(stcs):\n _validate_type(stc, _BaseSourceEstimate, 'stcs[%d]' % (si,),\n 'source estimate')\n _check_option(\n 'mode', mode, _get_allowed_label_modes(stc),\n 'when using a vector and/or volume source estimate')\n if isinstance(stc, (_BaseVolSourceEstimate,\n _BaseVectorSourceEstimate)):\n mode = 'mean' if mode == 'auto' else mode\n else:\n mode = 'mean_flip' if mode == 'auto' else mode\n if vertno is None:\n vertno = copy.deepcopy(stc.vertices) # avoid keeping a ref\n nvert = np.array([len(v) for v in vertno])\n label_vertidx, src_flip = _prepare_label_extraction(\n stc, labels, src, mode, allow_empty, use_sparse)\n func = _label_funcs[mode]\n # make sure the stc is compatible with the source space\n if len(vertno) != len(stc.vertices):\n raise ValueError('stc not compatible with source space')\n for vn, svn in zip(vertno, stc.vertices):\n if len(vn) != len(svn):\n raise ValueError('stc not compatible with source space. '\n 'stc has %s time series but there are %s '\n 'vertices in source space. Ensure you used '\n 'src from the forward or inverse operator, '\n 'as forward computation can exclude vertices.'\n % (len(svn), len(vn)))\n if not np.array_equal(svn, vn):\n raise ValueError('stc not compatible with source space')\n\n logger.info('Extracting time courses for %d labels (mode: %s)'\n % (n_labels, mode))\n\n # do the extraction\n label_tc = np.zeros((n_labels,) + stc.data.shape[1:],\n dtype=stc.data.dtype)\n for i, (vertidx, flip) in enumerate(zip(label_vertidx, src_flip)):\n if vertidx is not None:\n if isinstance(vertidx, sparse.csr_matrix):\n assert mri_resolution\n assert vertidx.shape[1] == stc.data.shape[0]\n this_data = np.reshape(stc.data, (stc.data.shape[0], -1))\n this_data = vertidx @ this_data\n this_data.shape = \\\n (this_data.shape[0],) + stc.data.shape[1:]\n else:\n this_data = stc.data[vertidx]\n label_tc[i] = func(flip, this_data)\n\n # extract label time series for the vol src space (only mean supported)\n offset = nvert[:-n_mean].sum() # effectively :2 or :0\n for i, nv in enumerate(nvert[2:]):\n if nv != 0:\n v2 = offset + nv\n label_tc[n_mode + i] = np.mean(stc.data[offset:v2], axis=0)\n offset = v2\n\n # this is a generator!\n yield label_tc\n\n\n@verbose\ndef extract_label_time_course(stcs, labels, src, mode='auto',\n allow_empty=False, return_generator=False,\n *, mri_resolution=True, verbose=None):\n \"\"\"Extract label time course for lists of labels and source estimates.\n\n This function will extract one time course for each label and source\n estimate. The way the time courses are extracted depends on the mode\n parameter (see Notes).\n\n Parameters\n ----------\n stcs : SourceEstimate | list (or generator) of SourceEstimate\n The source estimates from which to extract the time course.\n %(eltc_labels)s\n %(eltc_src)s\n %(eltc_mode)s\n %(eltc_allow_empty)s\n return_generator : bool\n If True, a generator instead of a list is returned.\n %(eltc_mri_resolution)s\n %(verbose)s\n\n Returns\n -------\n %(eltc_returns)s\n\n Notes\n -----\n %(eltc_mode_notes)s\n\n If encountering a ``ValueError`` due to mismatch between number of\n source points in the subject source space and computed ``stc`` object set\n ``src`` argument to ``fwd['src']`` or ``inv['src']`` to ensure the source\n space is the one actually used by the inverse to compute the source\n time courses.\n \"\"\"\n # convert inputs to lists\n if not isinstance(stcs, (list, tuple, GeneratorType)):\n stcs = [stcs]\n return_several = False\n return_generator = False\n else:\n return_several = True\n\n label_tc = _gen_extract_label_time_course(\n stcs, labels, src, mode=mode, allow_empty=allow_empty,\n mri_resolution=mri_resolution)\n\n if not return_generator:\n # do the extraction and return a list\n label_tc = list(label_tc)\n\n if not return_several:\n # input was a single SoureEstimate, return single array\n label_tc = label_tc[0]\n\n return label_tc\n\n\n@verbose\ndef stc_near_sensors(evoked, trans, subject, distance=0.01, mode='sum',\n project=True, subjects_dir=None, src=None, verbose=None):\n \"\"\"Create a STC from ECoG, sEEG and DBS sensor data.\n\n Parameters\n ----------\n evoked : instance of Evoked\n The evoked data. Must contain ECoG, sEEG or DBS channels.\n %(trans)s\n subject : str\n The subject name.\n distance : float\n Distance (m) defining the activation \"ball\" of the sensor.\n mode : str\n Can be \"sum\" to do a linear sum of weights, \"nearest\" to\n use only the weight of the nearest sensor, or \"zero\" to use a\n zero-order hold. See Notes.\n project : bool\n If True, project the electrodes to the nearest ``'pial`` surface\n vertex before computing distances. Only used when doing a\n surface projection.\n %(subjects_dir)s\n src : instance of SourceSpaces\n The source space.\n\n .. warning:: If a surface source space is used, make sure that\n ``surf='pial'`` was used during construction.\n %(verbose)s\n\n Returns\n -------\n stc : instance of SourceEstimate\n The surface source estimate. If src is None, a surface source\n estimate will be produced, and the number of vertices will equal\n the number of pial-surface vertices that were close enough to\n the sensors to take on a non-zero volue. If src is not None,\n a surface, volume, or mixed source estimate will be produced\n (depending on the kind of source space passed) and the\n vertices will match those of src (i.e., there may be me\n many all-zero values in stc.data).\n\n Notes\n -----\n For surface projections, this function projects the ECoG sensors to\n the pial surface (if ``project``), then the activation at each pial\n surface vertex is given by the mode:\n\n - ``'sum'``\n Activation is the sum across each sensor weighted by the fractional\n ``distance`` from each sensor. A sensor with zero distance gets weight\n 1 and a sensor at ``distance`` meters away (or larger) gets weight 0.\n If ``distance`` is less than the distance between any two electrodes,\n this will be the same as ``'nearest'``.\n - ``'weighted'``\n Same as ``'sum'`` except that only the nearest electrode is used,\n rather than summing across electrodes within the ``distance`` radius.\n As as ``'nearest'`` for vertices with distance zero to the projected\n sensor.\n - ``'nearest'``\n The value is given by the value of the nearest sensor, up to a\n ``distance`` (beyond which it is zero).\n\n If creating a Volume STC, ``src`` must be passed in, and this\n function will project sEEG and DBS sensors to nearby surrounding vertices.\n Then the activation at each volume vertex is given by the mode\n in the same way as ECoG surface projections.\n\n .. versionadded:: 0.22\n \"\"\"\n from scipy.spatial.distance import cdist, pdist\n from .evoked import Evoked\n _validate_type(evoked, Evoked, 'evoked')\n _validate_type(mode, str, 'mode')\n _validate_type(src, (None, SourceSpaces), 'src')\n _check_option('mode', mode, ('sum', 'single', 'nearest'))\n\n # create a copy of Evoked using ecog, seeg and dbs\n evoked = evoked.copy().pick_types(ecog=True, seeg=True, dbs=True)\n\n # get channel positions that will be used to pinpoint where\n # in the Source space we will use the evoked data\n pos = evoked._get_channel_positions()\n\n # remove nan channels\n nan_inds = np.where(np.isnan(pos).any(axis=1))[0]\n nan_chs = [evoked.ch_names[idx] for idx in nan_inds]\n evoked.drop_channels(nan_chs)\n pos = [pos[idx] for idx in range(len(pos)) if idx not in nan_inds]\n\n # coord_frame transformation from native mne \"head\" to MRI coord_frame\n trans, _ = _get_trans(trans, 'head', 'mri', allow_none=True)\n\n # convert head positions -> coord_frame MRI\n pos = apply_trans(trans, pos)\n\n subject = _check_subject(None, subject, False)\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n if src is None: # fake a full surface one\n rrs = [read_surface(op.join(subjects_dir, subject,\n 'surf', f'{hemi}.pial'))[0]\n for hemi in ('lh', 'rh')]\n src = SourceSpaces([\n dict(rr=rr / 1000., vertno=np.arange(len(rr)), type='surf',\n coord_frame=FIFF.FIFFV_COORD_MRI)\n for rr in rrs])\n del rrs\n keep_all = False\n else:\n keep_all = True\n # ensure it's a usable one\n klass = dict(\n surface=SourceEstimate,\n volume=VolSourceEstimate,\n mixed=MixedSourceEstimate,\n )\n _check_option('src.kind', src.kind, sorted(klass.keys()))\n klass = klass[src.kind]\n rrs = np.concatenate([s['rr'][s['vertno']] for s in src])\n if src[0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD:\n rrs = apply_trans(trans, rrs)\n # projection will only occur with surfaces\n logger.info(\n f'Projecting data from {len(pos)} sensor{_pl(pos)} onto {len(rrs)} '\n f'{src.kind} vertices: {mode} mode')\n if project and src.kind == 'surface':\n logger.info(' Projecting electrodes onto surface')\n pos = _project_onto_surface(pos, dict(rr=rrs), project_rrs=True,\n method='nearest')[2]\n\n min_dist = pdist(pos).min() * 1000\n logger.info(\n f' Minimum {\"projected \" if project else \"\"}intra-sensor distance: '\n f'{min_dist:0.1f} mm')\n\n # compute pairwise distance between source space points and sensors\n dists = cdist(rrs, pos)\n assert dists.shape == (len(rrs), len(pos))\n\n # only consider vertices within our \"epsilon-ball\"\n # characterized by distance kwarg\n vertices = np.where((dists <= distance).any(-1))[0]\n logger.info(f' {len(vertices)} / {len(rrs)} non-zero vertices')\n w = np.maximum(1. - dists[vertices] / distance, 0)\n # now we triage based on mode\n if mode in ('single', 'nearest'):\n range_ = np.arange(w.shape[0])\n idx = np.argmax(w, axis=1)\n vals = w[range_, idx] if mode == 'single' else 1.\n w.fill(0)\n w[range_, idx] = vals\n missing = np.where(~np.any(w, axis=0))[0]\n if len(missing):\n warn(f'Channel{_pl(missing)} missing in STC: '\n f'{\", \".join(evoked.ch_names[mi] for mi in missing)}')\n\n nz_data = w @ evoked.data\n if not keep_all:\n assert src.kind == 'surface'\n data = nz_data\n offset = len(src[0]['vertno'])\n vertices = [vertices[vertices < offset],\n vertices[vertices >= offset] - offset]\n else:\n data = np.zeros(\n (sum(len(s['vertno']) for s in src), len(evoked.times)),\n dtype=nz_data.dtype)\n data[vertices] = nz_data\n vertices = [s['vertno'].copy() for s in src]\n\n return klass(data, vertices, evoked.times[0], 1. / evoked.info['sfreq'],\n subject=subject, verbose=verbose)\n" ]
[ [ "numpy.diag", "numpy.dot", "scipy.linalg.svd", "numpy.asarray", "sklearn.feature_extraction.grid_to_graph", "numpy.in1d", "scipy.sparse.block_diag", "numpy.vstack", "numpy.concatenate", "numpy.round", "numpy.max", "numpy.mean", "numpy.any", "numpy.searchsorted", "numpy.iscomplexobj", "numpy.where", "scipy.sparse.coo_matrix", "numpy.linalg.svd", "numpy.allclose", "numpy.unique", "numpy.reshape", "numpy.arange", "numpy.matmul", "numpy.frombuffer", "numpy.intersect1d", "numpy.argmax", "numpy.diff", "numpy.less_equal", "numpy.insert", "scipy.linalg.norm", "numpy.isrealobj", "numpy.repeat", "numpy.zeros", "numpy.isnan", "scipy.spatial.distance.cdist", "scipy.sparse.csr_matrix", "numpy.atleast_2d", "numpy.log10", "scipy.sparse.hstack", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.fromfile", "numpy.maximum", "numpy.abs", "numpy.array_equal", "numpy.isfinite", "scipy.sparse.eye", "numpy.linalg.norm", "numpy.tile", "numpy.ones", "numpy.setdiff1d", "numpy.sign", "scipy.spatial.distance.pdist", "numpy.prod", "numpy.left_shift", "numpy.empty" ] ]
TMmichi/stable-baselines
[ "b55d2de60c90925bdf109e372f86789ad21b5a36" ]
[ "stable_baselines/common/base_class.py" ]
[ "import os\nimport glob\nimport json\nimport zipfile\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom collections import OrderedDict, deque\nfrom typing import Union, List, Callable, Optional\n\nimport gym\nimport cloudpickle\nimport numpy as np\nimport tensorflow as tf\n\nfrom stable_baselines.common.misc_util import set_global_seeds\nfrom stable_baselines.common.math_util import unscale_action\nfrom stable_baselines.common.save_util import data_to_json, json_to_data, params_to_bytes, bytes_to_params\nfrom stable_baselines.common.policies import get_policy_from_name, ActorCriticPolicy\nfrom stable_baselines.common.runners import AbstractEnvRunner\nfrom stable_baselines.common.vec_env import (VecEnvWrapper, VecEnv, DummyVecEnv,\n VecNormalize, unwrap_vec_normalize)\nfrom stable_baselines.common.callbacks import BaseCallback, CallbackList, ConvertCallback\nfrom stable_baselines import logger\n\n\nclass BaseRLModel(ABC):\n \"\"\"\n The base RL model\n\n :param policy: (BasePolicy) Policy object\n :param env: (Gym environment) The environment to learn from\n (if registered in Gym, can be str. Can be None for loading trained models)\n :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug\n :param requires_vec_env: (bool) Does this model require a vectorized environment\n :param policy_base: (BasePolicy) the base policy used by this method\n :param policy_kwargs: (dict) additional arguments to be passed to the policy on creation\n :param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).\n If None (default), use random seed. Note that if you want completely deterministic\n results, you must set `n_cpu_tf_sess` to 1.\n :param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations\n If None, the number of cpu of the current machine will be used.\n :param composite_primitive_name: (str) Name of the composite primitive. If None, will have no effect.\n \"\"\"\n\n def __init__(self, policy, env, verbose=0, *, requires_vec_env, policy_base,\n policy_kwargs=None, seed=None, n_cpu_tf_sess=None, composite_primitive_name=None):\n if isinstance(policy, str) and policy_base is not None:\n self.policy = get_policy_from_name(policy_base, policy)\n else:\n self.policy = policy\n self.env = env\n self.verbose = verbose\n self._requires_vec_env = requires_vec_env\n self.policy_kwargs = {} if policy_kwargs is None else policy_kwargs\n self.tails = []\n self.observation_space = None\n self.action_space = None\n self.n_envs = None\n self._vectorize_action = False\n self.num_timesteps = 0\n self.graph = None\n self.sess = None\n self.params = None\n self.seed = seed\n self._param_load_ops = None\n self.n_cpu_tf_sess = n_cpu_tf_sess\n self.episode_reward = None\n self.ep_info_buf = None\n self.composite_primitive_name = composite_primitive_name\n self.primitives = OrderedDict()\n self.top_hierarchy_level = 0\n\n if env is not None:\n print(\"env is not none\")\n if isinstance(env, str):\n if self.verbose >= 1:\n print(\"Creating environment from the given name, wrapped in a DummyVecEnv.\")\n self.env = env = DummyVecEnv([lambda: gym.make(env)])\n\n self.observation_space = env.observation_space\n self.action_space = env.action_space\n if requires_vec_env:\n if isinstance(env, VecEnv):\n self.n_envs = env.num_envs\n else:\n # The model requires a VecEnv\n # wrap it in a DummyVecEnv to avoid error\n self.env = DummyVecEnv([lambda: env])\n if self.verbose >= 1:\n print(\"Wrapping the env in a DummyVecEnv.\")\n self.n_envs = 1\n else:\n if isinstance(env, VecEnv):\n if env.num_envs == 1:\n self.env = _UnvecWrapper(env)\n self._vectorize_action = True\n else:\n raise ValueError(\"Error: the model requires a non vectorized environment \\\n or a single vectorized environment.\")\n self.n_envs = 1\n\n # Get VecNormalize object if it exists\n self._vec_normalize_env = unwrap_vec_normalize(self.env)\n\n def get_env(self):\n \"\"\"\n returns the current environment (can be None if not defined)\n\n :return: (Gym Environment) The current environment\n \"\"\"\n return self.env\n\n def get_vec_normalize_env(self) -> Optional[VecNormalize]:\n \"\"\"\n Return the ``VecNormalize`` wrapper of the training env\n if it exists.\n\n :return: Optional[VecNormalize] The ``VecNormalize`` env.\n \"\"\"\n return self._vec_normalize_env\n\n def set_env(self, env):\n \"\"\"\n Checks the validity of the environment, and if it is coherent, set it as the current environment.\n\n :param env: (Gym Environment) The environment for learning a policy\n \"\"\"\n if env is None and self.env is None:\n if self.verbose >= 1:\n print(\"Loading a model without an environment, \"\n \"this model cannot be trained until it has a valid environment.\")\n return\n elif env is None:\n raise ValueError(\"Error: trying to replace the current environment with None\")\n\n # sanity checking the environment\n # NOTE(tmmichi): sanity check not required for now. \n # Loaded obs_space can differ from the env.obs_space.\n # Initialize obs space of the RL model class with the environment obs space\n self.observation_space = env.observation_space\n self.action_space = env.action_space\n # assert self.observation_space == env.observation_space, \\\n # \"Error: the environment passed must have at least the same observation space \\\n # as the model was trained on. self.obs = {0}, env.obs = {1}\". \\\n # format(self.observation_space, env.observation_space)\n # assert self.action_space == env.action_space, \\\n # \"Error: the environment passed must have at least the same action space \\\n # as the model was trained on. self.act = {0}, env.act = {1}\". \\\n # format(self.action_space, env.action_space)\n\n if self._requires_vec_env:\n assert isinstance(env, VecEnv), \\\n \"Error: the environment passed is not a vectorized environment, however {} requires it\".format(\n self.__class__.__name__)\n assert not self.policy.recurrent or self.n_envs == env.num_envs, \\\n \"Error: the environment passed must have the same number of environments as the model was trained on.\" \\\n \"This is due to the Lstm policy not being capable of changing the number of environments.\"\n self.n_envs = env.num_envs\n else:\n # for models that dont want vectorized environment, check if they make sense and adapt them.\n # Otherwise tell the user about this issue\n if isinstance(env, VecEnv):\n if env.num_envs == 1:\n env = _UnvecWrapper(env)\n self._vectorize_action = True\n else:\n raise ValueError(\"Error: the model requires a non vectorized environment \\\n or a single vectorized environment.\")\n else:\n self._vectorize_action = False\n\n self.n_envs = 1\n\n self.env = env\n self._vec_normalize_env = unwrap_vec_normalize(env)\n\n # Invalidated by environment change.\n self.episode_reward = None\n self.ep_info_buf = None\n\n def _init_num_timesteps(self, reset_num_timesteps=True):\n \"\"\"\n Initialize and resets num_timesteps (total timesteps since beginning of training)\n if needed. Mainly used logging and plotting (tensorboard).\n\n :param reset_num_timesteps: (bool) Set it to false when continuing training\n to not create new plotting curves in tensorboard.\n :return: (bool) Whether a new tensorboard log needs to be created\n \"\"\"\n if reset_num_timesteps:\n self.num_timesteps = 0\n\n new_tb_log = self.num_timesteps == 0\n return new_tb_log\n\n @abstractmethod\n def setup_model(self):\n \"\"\"\n Create all the functions and tensorflow graphs necessary to train the model\n \"\"\"\n pass\n\n def _init_callback(self,\n callback: Union[None, Callable, List[BaseCallback], BaseCallback]\n ) -> BaseCallback:\n \"\"\"\n :param callback: (Union[None, Callable, List[BaseCallback], BaseCallback])\n :return: (BaseCallback)\n \"\"\"\n # Convert a list of callbacks into a callback\n if isinstance(callback, list):\n callback = CallbackList(callback)\n # Convert functional callback to object\n if not isinstance(callback, BaseCallback):\n callback = ConvertCallback(callback)\n\n callback.init_callback(self)\n return callback\n\n def set_random_seed(self, seed: Optional[int]) -> None:\n \"\"\"\n :param seed: (Optional[int]) Seed for the pseudo-random generators. If None,\n do not change the seeds.\n \"\"\"\n # Ignore if the seed is None\n if seed is None:\n return\n # Seed python, numpy and tf random generator\n set_global_seeds(seed)\n if self.env is not None:\n self.env.seed(seed)\n # Seed the action space\n # useful when selecting random actions\n self.env.action_space.seed(seed)\n self.action_space.seed(seed)\n\n def _setup_learn(self):\n \"\"\"\n Check the environment.\n \"\"\"\n if self.env is None:\n raise ValueError(\"\\033[91m[ERROR]: cannot train the model without a valid environment, \\\n please set an environment with set_env(self, env) method.\\033[0m\")\n if self.episode_reward is None:\n self.episode_reward = np.zeros((self.n_envs,))\n if self.ep_info_buf is None:\n self.ep_info_buf = deque(maxlen=100)\n\n def construct_primitive_info(self, name, freeze, level, \n obs_range: Union[dict, int], obs_index, act_range: Union[dict, int], act_index, act_scale, \n obs_relativity, layer_structure, subgoal=None, loaded_policy=None, load_value=False):\n '''\n Returns info of the primitive as a dictionary\n\n :param name: (str) name of the primitive\n :param freeze: (bool) primitive to be frozen at training time\n :param level: (int) hierarchical level of the primitive\n :param obs_range: (dict or int) a dictionary containing min/max bound of the observation range.\n If int, then range fixed to 0\n :param obs_index: ([int, ...]) a list of indices of the observation for the primitive\n :param act_range: (dict or int) a dictionary containing min/max bound of the action range.\n If int, then all dimensions of the range fixed to [0,1]\n :param act_index: ([int, ...]) a list of indices of the action for the primitive\n :param act_scale: (int) action scale\n :param obs_relativity: (dict) a dictionary of observation index-relativity\n :param layer_structure: (dict) layer structure of the primitive policy/value\n :param subgoal: (dict) a dictionary containing [primitive name, subgoal->obs index]\n :param loaded_policy: ((dict, dict)) tuple of data and parameters for pretrained policy.zip\n :param load_value: (bool) load separate value network for the primitive\n :return: (dict: {'obs':tuple, 'act':tuple, 'layer':dict}) primitive information\n '''\n\n # Newly appointed primitive from scratch (base policy)\n if isinstance(loaded_policy, type(None)):\n assert not freeze, \\\n '\\n\\t\\033[91m[ERROR]: Newly appointed primitive at training time cannot be frozen\\033[0m'\n assert name != None, \\\n '\\n\\t\\033[91m[ERROR]: Newly appointed primitive should have its name designated\\033[0m'\n # Meta-policies are always initialized from scratch\n if name == 'weight':\n self.top_hierarchy_level = level\n name = self.composite_primitive_name + \"/weight\"\n layer_name = '/'.join(['train','level'+str(level)+'_'+name])\n primitive_name = '/'.join(['level'+str(level)+'_'+name])\n self.tails.append(primitive_name)\n\n # Observation space initialization\n obs_dimension = len(obs_index)\n if isinstance(obs_range, dict):\n assert 'min' in obs_range.keys() and 'max' in obs_range.keys(), \\\n '\\n\\t\\033[91m[ERROR]: No keys named \"min\" or \"max\" within the obs_range.\\033[0m'\n assert len(obs_range['min']) == len(obs_range['max']), \\\n '\\n\\t\\033[91m[ERROR]: Length of minimum obs_range and maximum obs_range differs.\\033[0m'\n assert len(obs_range['min']) == obs_dimension, \\\n '\\n\\t\\033[91m[ERROR]: length of obs_range differs with the length of obs_index.\\033[0m'\n obs_range_max = np.array(obs_range['max'])\n obs_range_min = np.array(obs_range['min'])\n elif isinstance(obs_range, int):\n obs_range_max = np.array([0]*obs_dimension)\n obs_range_min = np.array([0]*obs_dimension)\n else:\n raise TypeError(\"\\n\\t\\033[91m[ERROR]: \\\n obs_range wrong type - Should be a dict or an int. Received {0}\\033[0m\".format(type(obs_range)))\n obs_index.sort()\n obs_space = gym.spaces.Box(obs_range_min, obs_range_max, dtype=np.float32)\n obs = (obs_space, obs_index)\n\n # Action space initialization\n act_dimension = len(act_index)\n if isinstance(act_range, dict):\n assert 'min' in act_range.keys() and 'max' in act_range.keys(), \\\n '\\n\\t\\033[91m[ERROR]: No keys named \"min\" or \"max\" within the act_range.\\033[0m'\n assert len(act_range['min']) == len(act_range['max']), \\\n '\\n\\t\\033[91m[ERROR]: Length of the minimum act_range and the maximum act_range differs.\\033[0m'\n assert len(act_range['min']) == act_dimension, \\\n '\\n\\t\\033[91m[ERROR]: Length of the act_range differs with the length of act_index.\\033[0m'\n act_range_max = np.array(act_range['max'])\n act_range_min = np.array(act_range['min'])\n elif isinstance(act_range, int):\n act_range_max = np.array([1]*act_dimension)\n act_range_min = np.array([0]*act_dimension)\n else:\n raise TypeError(\"\\n\\t\\033[91m[ERROR]: \\\n act_index wrong type, should be a dict or an int. Received {0}\\033[0m\".format(type(act_index)))\n act_index.sort()\n act_space = gym.spaces.Box(act_range_min, act_range_max, dtype=np.float32)\n act = (act_space, act_index)\n\n policy_layer_structure = layer_structure.get('policy', None)\n value_layer_structure = layer_structure.get('value', None)\n main_tail = True # The primitive is the main tail\n tails = None # The primitive does not have any tail since it's a base policy\n load_value = False # We do not have any value networks to be loaded, so set to False\n \n # A pretrained primitive or a whole HPC module is loaded to:\n # 1. be run at the test time\n # 2. continue training from the parameters where it left off\n # 3. form the HPC module\n # determined by `name`\n elif isinstance(loaded_policy, tuple):\n cnt = False\n data_dict, param_dict = loaded_policy # Retrieve data dictionary from the loaded policy\n submodule_primitive = data_dict.get('primitives',OrderedDict()) # Get primitive dictionary of the loaded policy\n\n # 1. The HPC module is loaded to be run at the test time\n if name is None:\n primitive_name = 'loaded'\n self.composite_primitive_name = data_dict.get('composite_primitive_name', None)\n self.top_hierarchy_level = level\n layer_name = 'loaded'\n self.tails = tails = data_dict['tails']\n main_tail = True # We're at the main tail\n # 2. The HPC module is loaded to be trained continuously from the point it left off\n elif name is 'continue':\n cnt = True\n self.composite_primitive_name = data_dict.get('composite_primitive_name', None)\n self.top_hierarchy_level = level\n layer_name = 'freeze/loaded'\n self.tails = tails = data_dict['tails']\n main_tail = True\n for tail in tails:\n submodule_primitive[tail]['main_tail'] = True\n # 3. The primitive is loaded to form the HPC module\n else:\n loaded_name = data_dict.get('composite_primitive_name', None)\n if name != loaded_name and loaded_name is not None:\n print(\"\\n\\t\\033[93m[WARNING]: \\\n Name of the loaded policy ({0}) is different from the received name ({1}). \\\n {0} will be used.\\033[0m\".format(loaded_name,name))\n name = loaded_name\n layer_name_list = ['freeze' if freeze else 'train','loaded','level'+str(level)+'_'+name]\n primitive_name_list = ['level'+str(level)+'_'+name]\n if level == 1:\n layer_name_list.append('level0')\n primitive_name_list.append('level0')\n tails = None\n else:\n tails = data_dict.get('tails', [])\n tails = None if len(tails) == 0 else tails\n layer_name = '/'.join(layer_name_list)\n primitive_name = '/'.join(primitive_name_list)\n self.tails.append(primitive_name)\n main_tail = True\n self.primitives = {**self.primitives, **submodule_primitive}\n\n obs_space = data_dict['observation_space']\n act_space = data_dict['action_space']\n obs_index.sort()\n act_index.sort()\n\n obs = (obs_space, obs_index)\n act = (act_space, act_index)\n\n if 'pretrained_param' not in self.primitives.keys():\n self.primitives['pretrained_param'] = [[],{}]\n updated_layer_name, updated_param_dict = self.loaded_policy_name_update(layer_name, param_dict, load_value, cnt)\n self.primitives['pretrained_param'][0] += updated_layer_name\n self.primitives['pretrained_param'][1] = {**self.primitives['pretrained_param'][1], **updated_param_dict}\n policy_layer_structure, value_layer_structure = self.get_layer_structure((obs, act), param_dict, load_value)\n else:\n raise TypeError(\"\\n\\t\\033[91m[ERROR]: loaded_policy wrong type - Should be None or a tuple. \\\n Received {0}\\033[0m\".format(type(loaded_policy)))\n \n if not cnt:\n self.primitives[primitive_name] = {\n 'obs': obs, 'act': act, 'act_scale': act_scale, 'obs_relativity': obs_relativity,\n 'subgoal': subgoal, 'layer': {'policy': policy_layer_structure, 'value': value_layer_structure},\n 'layer_name': layer_name, 'tails':tails, 'main_tail':main_tail, 'load_value': load_value}\n \n def loaded_policy_name_update(self, layer_name, loaded_policy_dict, load_value, cnt):\n '''\n Concatenate name of each layers with name of the primitive\n\n :param layer_name: (str) name of the primitive layer\n :param loaded_policy_dict: (dict) Dictionary of parameters of layers by name\n :param load_value: (bool) Use loaded separate value network\n :return: (list, dict) List consisting names of layers with specified primitive\n Dictionary of parameters by updated names\n '''\n layer_name_list = []\n layer_param_dict = {}\n comp_name = 'level'+str(self.top_hierarchy_level)+\"_\"+self.composite_primitive_name\n for name, value in loaded_policy_dict.items():\n add_value = False\n name_elem = name.split(\"/\")\n assert not 'LayerNorm' in name_elem, \\\n \"\\n\\t\\033[91m[ERROR]: LayerNormalized policy is not supported for now. \\\n Try loading primitives with unnormalized layers\\033[0m\"\n\n if 'pi' in name_elem:\n insert_index = 2\n add_value = True\n elif 'values_fn' in name_elem and load_value: \n insert_index = 3\n add_value = True\n\n if add_value:\n if cnt and comp_name in name_elem:\n name_elem.insert(insert_index, 'train')\n else:\n name_elem.insert(insert_index, layer_name)\n updated_name = '/'.join(name_elem)\n layer_name_list.append(updated_name)\n layer_param_dict[updated_name] = value\n\n return layer_name_list, layer_param_dict\n\n @staticmethod\n def get_layer_structure(argument_tuple, loaded_policy_dict, load_value):\n '''\n Return layer structure of the policy/value from parameter dictionary\n\n :param argument_tuple: ((tuple, tuple)) Tuple containing info of observation and action\n :param loaded_policy_dict: (dict) Dictionary of parameters of layers by name\n :param load_value: (bool) Use loaded separate value network\n :return: (list, list) Layer structure of the policy/value\n '''\n # obs, _ = argument_tuple\n policy_layer_structure = []\n value_layer_structure = []\n for name, value in loaded_policy_dict.items():\n if name.find(\"pi/fc\") > -1:\n if name.find(\"fc0/kernel\") > -1:\n pass\n if name.find(\"bias\") > -1:\n policy_layer_structure.append(value.shape[0])\n if load_value:\n if name.find('model/values_fn/vf/fc') > -1:\n if name.find('bias') > -1:\n value_layer_structure.append(value.shape[0])\n \n return policy_layer_structure, value_layer_structure\n\n @abstractmethod\n def get_parameter_list(self):\n \"\"\"\n Get tensorflow Variables of model's parameters\n\n This includes all variables necessary for continuing training (saving / loading).\n\n :return: (list) List of tensorflow Variables\n \"\"\"\n pass\n\n def get_parameters(self, hierarchical=False):\n \"\"\"\n Get current model parameters as dictionary of variable name -> ndarray.\n\n :return: (OrderedDict) Dictionary of variable name -> ndarray of model's parameters.\n \"\"\"\n parameters = self.get_parameter_list() #self.params\n parameter_values = self.sess.run(parameters)\n if not hierarchical:\n return_dictionary = OrderedDict((param.name, value) for param, value in zip(parameters, parameter_values))\n else:\n names = []\n for param in parameters:\n name_list = param.name.split('/')\n if 'train' in name_list:\n name_list.remove('train')\n try:\n name_list.remove('loaded')\n except Exception:\n pass\n name = \"/\".join(name_list)\n elif 'freeze' in name_list:\n name_list.remove('freeze')\n name_list.remove('loaded')\n name = \"/\".join(name_list)\n else:\n name = param.name\n names.append(name)\n return_dictionary = OrderedDict((name, value) for name, value in zip(names, parameter_values))\n\n return return_dictionary\n\n def _setup_load_operations(self):\n \"\"\"\n Create tensorflow operations for loading model parameters\n \"\"\"\n # Assume tensorflow graphs are static -> check\n # that we only call this function once\n if self._param_load_ops is not None:\n raise RuntimeError(\"Parameter load operations have already been created\")\n # For each loadable parameter, create appropiate\n # placeholder and an assign op, and store them to\n # self.load_param_ops as dict of variable.name -> (placeholder, assign)\n loadable_parameters = self.get_parameter_list()\n\n # Use OrderedDict to store order for backwards compatibility with\n # list-based params\n self._param_load_ops = OrderedDict()\n with self.graph.as_default():\n for param in loadable_parameters:\n placeholder = tf.placeholder(dtype=param.dtype, shape=param.shape)\n # param.name is unique (tensorflow variables have unique names)\n self._param_load_ops[param.name] = (placeholder, param.assign(placeholder))\n\n @abstractmethod\n def _get_pretrain_placeholders(self):\n \"\"\"\n Return the placeholders needed for the pretraining:\n - obs_ph: observation placeholder\n - actions_ph will be population with an action from the environment\n (from the expert dataset)\n - deterministic_actions_ph: e.g., in the case of a Gaussian policy,\n the mean.\n\n :return: ((tf.placeholder)) (obs_ph, actions_ph, deterministic_actions_ph)\n \"\"\"\n pass\n\n def pretrain(self, dataset, n_epochs=10, learning_rate=1e-4,\n adam_epsilon=1e-8, val_interval=None):\n \"\"\"\n Pretrain a model using behavior cloning:\n supervised learning given an expert dataset.\n\n NOTE: only Box and Discrete spaces are supported for now.\n\n :param dataset: (ExpertDataset) Dataset manager\n :param n_epochs: (int) Number of iterations on the training set\n :param learning_rate: (float) Learning rate\n :param adam_epsilon: (float) the epsilon value for the adam optimizer\n :param val_interval: (int) Report training and validation losses every n epochs.\n By default, every 10th of the maximum number of epochs.\n :return: (BaseRLModel) the pretrained model\n \"\"\"\n continuous_actions = isinstance(self.action_space, gym.spaces.Box)\n discrete_actions = isinstance(self.action_space, gym.spaces.Discrete)\n\n assert discrete_actions or continuous_actions, 'Only Discrete and Box action spaces are supported'\n\n # Validate the model every 10% of the total number of iteration\n if val_interval is None:\n # Prevent modulo by zero\n if n_epochs < 10:\n val_interval = 1\n else:\n val_interval = int(n_epochs / 10)\n\n with self.graph.as_default():\n with tf.variable_scope('pretrain'):\n if continuous_actions:\n obs_ph, actions_ph, deterministic_actions_ph = self._get_pretrain_placeholders()\n loss = tf.reduce_mean(tf.square(actions_ph - deterministic_actions_ph))\n else:\n obs_ph, actions_ph, actions_logits_ph = self._get_pretrain_placeholders()\n # actions_ph has a shape if (n_batch,), we reshape it to (n_batch, 1)\n # so no additional changes is needed in the dataloader\n actions_ph = tf.expand_dims(actions_ph, axis=1)\n one_hot_actions = tf.one_hot(actions_ph, self.action_space.n)\n loss = tf.nn.softmax_cross_entropy_with_logits_v2(\n logits=actions_logits_ph,\n labels=tf.stop_gradient(one_hot_actions)\n )\n loss = tf.reduce_mean(loss)\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=adam_epsilon)\n optim_op = optimizer.minimize(loss, var_list=self.params)\n\n self.sess.run(tf.global_variables_initializer())\n\n if self.verbose > 0:\n print(\"Pretraining with Behavior Cloning...\")\n\n for epoch_idx in range(int(n_epochs)):\n train_loss = 0.0\n # Full pass on the training set\n for _ in range(len(dataset.train_loader)):\n expert_obs, expert_actions = dataset.get_next_batch('train')\n feed_dict = {\n obs_ph: expert_obs,\n actions_ph: expert_actions,\n }\n train_loss_, _ = self.sess.run([loss, optim_op], feed_dict)\n train_loss += train_loss_\n\n train_loss /= len(dataset.train_loader)\n\n if self.verbose > 0 and (epoch_idx + 1) % val_interval == 0:\n val_loss = 0.0\n # Full pass on the validation set\n for _ in range(len(dataset.val_loader)):\n expert_obs, expert_actions = dataset.get_next_batch('val')\n val_loss_, = self.sess.run([loss], {obs_ph: expert_obs,\n actions_ph: expert_actions})\n val_loss += val_loss_\n\n val_loss /= len(dataset.val_loader)\n if self.verbose > 0:\n print(\"==== Training progress {:.2f}% ====\".format(100 * (epoch_idx + 1) / n_epochs))\n print('Epoch {}'.format(epoch_idx + 1))\n print(\"Training loss: {:.6f}, Validation loss: {:.6f}\".format(train_loss, val_loss))\n print()\n # Free memory\n del expert_obs, expert_actions\n if self.verbose > 0:\n print(\"Pretraining done.\")\n return self\n\n @abstractmethod\n def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name=\"run\",\n reset_num_timesteps=True):\n \"\"\"\n Return a trained model.\n\n :param total_timesteps: (int) The total number of samples to train on\n :param callback: (Union[callable, [callable], BaseCallback])\n function called at every steps with state of the algorithm.\n It takes the local and global variables. If it returns False, training is aborted.\n When the callback inherits from BaseCallback, you will have access\n to additional stages of the training (training start/end),\n please read the documentation for more details.\n :param log_interval: (int) The number of timesteps before logging.\n :param tb_log_name: (str) the name of the run for tensorboard log\n :param reset_num_timesteps: (bool) whether or not to reset the current timestep number (used in logging)\n :return: (BaseRLModel) the trained model\n \"\"\"\n pass\n\n @abstractmethod\n def predict(self, observation, state=None, mask=None, deterministic=False):\n \"\"\"\n Get the model's action from an observation\n\n :param observation: (np.ndarray) the input observation\n :param state: (np.ndarray) The last states (can be None, used in recurrent policies)\n :param mask: (np.ndarray) The last masks (can be None, used in recurrent policies)\n :param deterministic: (bool) Whether or not to return deterministic actions.\n :return: (np.ndarray, np.ndarray) the model's action and the next state (used in recurrent policies)\n \"\"\"\n pass\n\n @abstractmethod\n def action_probability(self, observation, state=None, mask=None, actions=None, logp=False):\n \"\"\"\n If ``actions`` is ``None``, then get the model's action probability distribution from a given observation.\n\n Depending on the action space the output is:\n - Discrete: probability for each possible action\n - Box: mean and standard deviation of the action output\n\n However if ``actions`` is not ``None``, this function will return the probability that the given actions are\n taken with the given parameters (observation, state, ...) on this model. For discrete action spaces, it\n returns the probability mass; for continuous action spaces, the probability density. This is since the\n probability mass will always be zero in continuous spaces, see http://blog.christianperone.com/2019/01/\n for a good explanation\n\n :param observation: (np.ndarray) the input observation\n :param state: (np.ndarray) The last states (can be None, used in recurrent policies)\n :param mask: (np.ndarray) The last masks (can be None, used in recurrent policies)\n :param actions: (np.ndarray) (OPTIONAL) For calculating the likelihood that the given actions are chosen by\n the model for each of the given parameters. Must have the same number of actions and observations.\n (set to None to return the complete action probability distribution)\n :param logp: (bool) (OPTIONAL) When specified with actions, returns probability in log-space.\n This has no effect if actions is None.\n :return: (np.ndarray) the model's (log) action probability\n \"\"\"\n pass\n\n def load_parameters(self, load_path_or_dict, exact_match=True, only={}):\n \"\"\"\n Load model parameters from a file or a dictionary\n\n Dictionary keys should be tensorflow variable names, which can be obtained\n with ``get_parameters`` function. If ``exact_match`` is True, dictionary\n should contain keys for all model's parameters, otherwise RunTimeError\n is raised. If False, only variables included in the dictionary will be updated.\n\n This does not load agent's hyper-parameters.\n\n .. warning::\n This function does not update trainer/optimizer variables (e.g. momentum).\n As such training after using this function may lead to less-than-optimal results.\n\n :param load_path_or_dict: (str or file-like or dict) Save parameter location\n or dict of parameters as variable.name -> ndarrays to be loaded.\n :param exact_match: (bool) If True, expects load dictionary to contain keys for\n all variables in the model. If False, loads parameters only for variables\n mentioned in the dictionary. Defaults to True.\n \"\"\"\n # Make sure we have assign ops\n if self._param_load_ops is None:\n self._setup_load_operations()\n if isinstance(load_path_or_dict, dict):\n # Assume `load_path_or_dict` is dict of variable.name -> ndarrays we want to load\n params = load_path_or_dict\n elif isinstance(load_path_or_dict, list):\n warnings.warn(\"Loading model parameters from a list. This has been replaced \" +\n \"with parameter dictionaries with variable names and parameters. \" +\n \"If you are loading from a file, consider re-saving the file.\",\n DeprecationWarning)\n # Assume `load_path_or_dict` is list of ndarrays.\n # Create param dictionary assuming the parameters are in same order\n # as `get_parameter_list` returns them.\n params = dict()\n for i, param_name in enumerate(self._param_load_ops.keys()):\n params[param_name] = load_path_or_dict[i]\n else:\n # Assume a filepath or file-like.\n # Use existing deserializer to load the parameters.\n # We only need the parameters part of the file, so\n # only load that part.\n _, params = BaseRLModel._load_from_file(load_path_or_dict, load_data=False)\n params = dict(params)\n\n feed_dict = {}\n param_update_ops = []\n # Keep track of not-updated variables\n not_updated_variables = set(self._param_load_ops.keys())\n for param_name, param_value in params.items():\n try:\n if 'policy' in only.keys():\n print(\"Only policy parameters will be updated\")\n if 'pi' in param_name.split('/'):\n placeholder, assign_op = self._param_load_ops[param_name]\n feed_dict[placeholder] = param_value\n # Create list of tf.assign operations for sess.run\n param_update_ops.append(assign_op)\n print(\"Loaded param: \",param_name)\n else:\n print(\"Unloaded param: \",param_name)\n # Keep track which variables are updated\n not_updated_variables.remove(param_name)\n elif 'q' in only.keys() or 'value' in only.keys():\n print(\"Only value parameters will be updated\")\n if 'values_fn' in param_name.split('/'):\n placeholder, assign_op = self._param_load_ops[param_name]\n feed_dict[placeholder] = param_value\n # Create list of tf.assign operations for sess.run\n param_update_ops.append(assign_op)\n print(\"Loaded param: \",param_name)\n else:\n print(\"Unloaded param: \",param_name)\n # Keep track which variables are updated\n not_updated_variables.remove(param_name)\n else:\n placeholder, assign_op = self._param_load_ops[param_name]\n feed_dict[placeholder] = param_value\n # Create list of tf.assign operations for sess.run\n param_update_ops.append(assign_op)\n # Keep track which variables are updated\n not_updated_variables.remove(param_name)\n print(\"Loaded param: \",param_name)\n except Exception as e:\n print(\"Param not in graph: \",param_name)\n\n for param_name in not_updated_variables:\n print(\"Unloaded param: \", param_name)\n\n # Check that we updated all parameters if exact_match=True\n if exact_match and len(not_updated_variables) > 0:\n raise RuntimeError(\"Load dictionary did not contain all variables. \" +\n \"Missing variables: {}\".format(\", \".join(not_updated_variables)))\n\n self.sess.run(param_update_ops, feed_dict=feed_dict)\n\n @abstractmethod\n def save(self, save_path, cloudpickle=False):\n \"\"\"\n Save the current parameters to file\n\n :param save_path: (str or file-like) The save location\n :param cloudpickle: (bool) Use older cloudpickle format instead of zip-archives.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n @abstractmethod\n def load(cls, load_path, policy=None, env=None, custom_objects=None, **kwargs):\n \"\"\"\n Load the model from file\n\n :param load_path: (str or file-like) the saved parameter location\n :param env: (Gym Environment) the new environment to run the loaded model on\n (can be None if you only need prediction from a trained model)\n :param custom_objects: (dict) Dictionary of objects to replace\n upon loading. If a variable is present in this dictionary as a\n key, it will not be deserialized and the corresponding item\n will be used instead. Similar to custom_objects in\n `keras.models.load_model`. Useful when you have an object in\n file that can not be deserialized.\n :param kwargs: extra arguments to change the model when loading\n \"\"\"\n raise NotImplementedError()\n\n @staticmethod\n def pretrainer_load(model, env, **kwargs):\n \"\"\"\n Construct trainer from policy structure\n\n :param model: (Algorithm Class) class of an Algorithm which includes primitives dictionary as a member variable\n :param policy: (BasePolicy) Policy object\n :param primitives: (dict) primitives by name to which items assigned as an info of obs/act/layer_structure\n :param env: (Gym Environment) the new environment to run the loaded model on\n :param kwargs: extra arguments to change the model when loading\n \"\"\"\n if 'loaded' in model.primitives.keys():\n data = {'observation_space': model.primitives['loaded']['obs'][0], \\\n 'action_space': model.primitives['loaded']['act'][0]}\n else:\n # Check the existence of 'train/weight' in primitives\n weight_name = model.weight_check(model.primitives, \n model.composite_primitive_name, \n model.top_hierarchy_level)\n \n # get total_obs_bound, total_act_bound\n ranges = model.range_primitive(model.primitives, \n model.composite_primitive_name, \n model.top_hierarchy_level)\n model.primitives[weight_name]['composite_action_index'] = list(range(len(ranges[1][0])))\n\n # data = {'observation_space': gym.spaces.Box(ranges[0][0], ranges[0][1], dtype=np.float32), \\\n # 'action_space': gym.spaces.Box(ranges[1][0], ranges[1][1], dtype=np.float32)}\n data = {'action_space': gym.spaces.Box(ranges[1][0], ranges[1][1], dtype=np.float32)}\n \n model.__dict__.update(kwargs)\n model.__dict__.update(data)\n \n model.set_env(env)\n model.setup_HPC_model(model.primitives)\n\n model.load_parameters(model.primitives['pretrained_param'][1], exact_match=False)\n model.primitives.pop('pretrained_param')\n for tail in model.tails:\n model.primitives[tail]['main_tail'] = False\n\n \n @staticmethod\n def weight_check(primitives: dict, composite_primitive_name: str, level: int):\n '''\n Check the existence of 'train/weight' in primitive dict\n\n :param primitives: (dict) obs/act/structure info of primitives\n '''\n weight_name = 'level'+str(level)+'_'+composite_primitive_name+\"/weight\"\n assert weight_name in primitives.keys(), \\\n '\\n\\t\\033[91m[ERROR]: No weight at the top level hierarchy. YOU MUST HAVE IT\\033[0m'\n \n return weight_name\n \n @staticmethod\n def range_primitive(primitives: dict, composite_primitive_name: str, level: int) -> list:\n '''\n Return range bounds of total observation/action space\n\n :param primitives: (dict) obs/act/structure info of primitives\n :return: ([2x2 np.array]): \n dims[0][0] = obs min np.array, dims[0][1] = obs max np.array\n dims[1][0] = act min np.array, dims[1][1] = act max np.array\n '''\n weight_name = 'level'+str(level)+'_'+composite_primitive_name+\"/weight\"\n # obs_dim = len(primitives[weight_name]['obs'][1]) # dimension = last index + 1\n obs_dim = primitives[weight_name]['obs'][1][-1]+1 # dimension = last index + 1\n obs_min_array = np.array([-float('inf')]*obs_dim)\n obs_max_array = np.array([float('inf')]*obs_dim)\n\n act_dim = 0\n for name, info_dict in primitives.items():\n if name != 'pretrained_param' and 'weight' not in name.split('/'):\n act_dim = max(act_dim, info_dict['act'][1][-1]+1)\n act_min_array = np.array([-float('inf')]*act_dim)\n act_max_array = np.array([float('inf')]*act_dim)\n\n for name, info_dict in primitives.items():\n if name != 'pretrained_param' and 'weight' not in name.split('/'):\n # print(\"Updating primitive range: \", name)\n # for i, idx in enumerate(info_dict['obs'][1]):\n # obs_min_prim = info_dict['obs'][0].low[idx]\n # obs_max_prim = info_dict['obs'][0].high[idx]\n # if obs_min_array[idx] not in [-float('inf'), obs_min_prim]:\n # print(\"\\n\\t\\033[93m[WARNING]: \\\n # You are about to overwrite dim{2} min bound of obs[{0:2.3f}] with {1:2.3f}\\033[0m\". \\\n # format(obs_min_array[idx], obs_min_prim, idx))\n # obs_min_array[idx] = obs_min_prim\n # if obs_max_array[idx] not in [float('inf'), obs_max_prim]:\n # print(\"\\n\\t\\033[93m[WARNING]: \\\n # You are about to overwrite dim{2} max bound of obs[{0:2.3f}] with {1:2.3f}\\033[0m\". \\\n # format(obs_max_array[idx], obs_max_prim, idx))\n # obs_max_array[idx] = obs_max_prim\n # for i, idx in enumerate(info_dict['act'][1]):\n # act_min_prim = info_dict['act'][0].low[idx]\n # act_max_prim = info_dict['act'][0].high[idx]\n # if act_min_array[idx] not in [-float('inf'), act_min_prim]:\n # print(\"\\n\\t\\033[93m[WARNING]: \\\n # You are about to overwrite dim{2} min bound of act[{0:2.3f}] with {1:2.3f}\\033[0m\". \\\n # format(act_min_array[idx], act_min_prim, idx))\n # act_min_array[idx] = act_min_prim\n # if act_max_array[idx] not in [float('inf'), act_max_prim]:\n # print(\"\\n\\t\\033[93m[WARNING]: \\\n # You are about to overwrite dim{2} max bound of act[{0:2.3f}] with {1:2.3f}\\033[0m\". \\\n # format(act_max_array[idx], act_max_prim, idx))\n # act_max_array[idx] = act_max_prim\n pass\n ranges = [[obs_min_array, obs_max_array], [act_min_array, act_max_array]]\n return ranges\n\n @staticmethod\n def _save_to_file_cloudpickle(save_path, data=None, params=None):\n \"\"\"Legacy code for saving models with cloudpickle\n\n :param save_path: (str or file-like) Where to store the model\n :param data: (OrderedDict) Class parameters being stored\n :param params: (OrderedDict) Model parameters being stored\n \"\"\"\n if isinstance(save_path, str):\n _, ext = os.path.splitext(save_path)\n if ext == \"\":\n save_path += \".pkl\"\n\n with open(save_path, \"wb\") as file_:\n cloudpickle.dump((data, params), file_)\n else:\n # Here save_path is a file-like object, not a path\n cloudpickle.dump((data, params), save_path)\n\n @staticmethod\n def _save_to_file_zip(save_path, data=None, params=None):\n \"\"\"Save model to a .zip archive\n\n :param save_path: (str or file-like) Where to store the model\n :param data: (OrderedDict) Class parameters being stored\n :param params: (OrderedDict) Model parameters being stored\n \"\"\"\n # data/params can be None, so do not\n # try to serialize them blindly\n if data is not None:\n serialized_data = data_to_json(data)\n if params is not None:\n serialized_params = params_to_bytes(params)\n # We also have to store list of the parameters\n # to store the ordering for OrderedDict.\n # We can trust these to be strings as they\n # are taken from the Tensorflow graph.\n serialized_param_list = json.dumps(\n list(params.keys()),\n indent=4\n )\n\n # Check postfix if save_path is a string\n if isinstance(save_path, str):\n _, ext = os.path.splitext(save_path)\n if ext == \"\":\n save_path += \".zip\"\n\n # Create a zip-archive and write our objects\n # there. This works when save_path\n # is either str or a file-like\n with zipfile.ZipFile(save_path, \"w\") as file_:\n # Do not try to save \"None\" elements\n if data is not None:\n file_.writestr(\"data\", serialized_data)\n if params is not None:\n file_.writestr(\"parameters\", serialized_params)\n file_.writestr(\"parameter_list\", serialized_param_list)\n\n @staticmethod\n def _save_to_file(save_path, data=None, params=None, cloudpickle=False):\n \"\"\"Save model to a zip archive or cloudpickle file.\n\n :param save_path: (str or file-like) Where to store the model\n :param data: (OrderedDict) Class parameters being stored\n :param params: (OrderedDict) Model parameters being stored\n :param cloudpickle: (bool) Use old cloudpickle format\n (stable-baselines<=2.7.0) instead of a zip archive.\n \"\"\"\n if cloudpickle:\n BaseRLModel._save_to_file_cloudpickle(save_path, data, params)\n else:\n BaseRLModel._save_to_file_zip(save_path, data, params)\n\n @staticmethod\n def _load_from_file_cloudpickle(load_path):\n \"\"\"Legacy code for loading older models stored with cloudpickle\n\n :param load_path: (str or file-like) where from to load the file\n :return: (dict, OrderedDict) Class parameters and model parameters\n \"\"\"\n if isinstance(load_path, str):\n if not os.path.exists(load_path):\n if os.path.exists(load_path + \".pkl\"):\n load_path += \".pkl\"\n else:\n raise ValueError(\"Error: the file {} could not be found\".format(load_path))\n\n with open(load_path, \"rb\") as file_:\n data, params = cloudpickle.load(file_)\n else:\n # Here load_path is a file-like object, not a path\n data, params = cloudpickle.load(load_path)\n\n return data, params\n\n @staticmethod\n def _load_from_file(load_path, load_data=True, custom_objects=None):\n \"\"\"Load model data from a .zip archive\n\n :param load_path: (str or file-like) Where to load model from\n :param load_data: (bool) Whether we should load and return data\n (class parameters). Mainly used by `load_parameters` to\n only load model parameters (weights).\n :param custom_objects: (dict) Dictionary of objects to replace\n upon loading. If a variable is present in this dictionary as a\n key, it will not be deserialized and the corresponding item\n will be used instead. Similar to custom_objects in\n `keras.models.load_model`. Useful when you have an object in\n file that can not be deserialized.\n :return: (dict, OrderedDict) Class parameters and model parameters\n \"\"\"\n # Check if file exists if load_path is\n # a string\n if isinstance(load_path, str):\n if not os.path.exists(load_path):\n if os.path.exists(load_path + \".zip\"):\n load_path += \".zip\"\n else:\n raise ValueError(\"Error: the file {} could not be found\".format(load_path))\n\n # Open the zip archive and load data.\n try:\n with zipfile.ZipFile(load_path, \"r\") as file_:\n namelist = file_.namelist()\n # If data or parameters is not in the\n # zip archive, assume they were stored\n # as None (_save_to_file allows this).\n data = None\n params = None\n if \"data\" in namelist and load_data:\n # Load class parameters and convert to string\n # (Required for json library in Python 3.5)\n json_data = file_.read(\"data\").decode()\n data = json_to_data(json_data, custom_objects=custom_objects)\n\n if \"parameters\" in namelist:\n # Load parameter list and and parameters\n parameter_list_json = file_.read(\"parameter_list\").decode()\n parameter_list = json.loads(parameter_list_json)\n serialized_params = file_.read(\"parameters\")\n params = bytes_to_params(\n serialized_params, parameter_list\n )\n except zipfile.BadZipFile:\n # load_path wasn't a zip file. Possibly a cloudpickle\n # file. Show a warning and fall back to loading cloudpickle.\n warnings.warn(\"It appears you are loading from a file with old format. \" +\n \"Older cloudpickle format has been replaced with zip-archived \" +\n \"models. Consider saving the model with new format.\",\n DeprecationWarning)\n # Attempt loading with the cloudpickle format.\n # If load_path is file-like, seek back to beginning of file\n if not isinstance(load_path, str):\n load_path.seek(0)\n data, params = BaseRLModel._load_from_file_cloudpickle(load_path)\n\n return data, params\n\n @staticmethod\n def _softmax(x_input):\n \"\"\"\n An implementation of softmax.\n\n :param x_input: (numpy float) input vector\n :return: (numpy float) output vector\n \"\"\"\n x_exp = np.exp(x_input.T - np.max(x_input.T, axis=0))\n return (x_exp / x_exp.sum(axis=0)).T\n\n @staticmethod\n def _is_vectorized_observation(observation, observation_space):\n \"\"\"\n For every observation type, detects and validates the shape,\n then returns whether or not the observation is vectorized.\n\n :param observation: (np.ndarray) the input observation to validate\n :param observation_space: (gym.spaces) the observation space\n :return: (bool) whether the given observation is vectorized or not\n \"\"\"\n if isinstance(observation_space, gym.spaces.Box):\n if observation.shape == observation_space.shape:\n return False\n elif observation.shape[1:] == observation_space.shape:\n return True\n else:\n raise ValueError(\"Error: Unexpected observation shape {} for \".format(observation.shape) +\n \"Box environment, please use {} \".format(observation_space.shape) +\n \"or (n_env, {}) for the observation shape.\"\n .format(\", \".join(map(str, observation_space.shape))))\n elif isinstance(observation_space, gym.spaces.Discrete):\n if observation.shape == (): # A numpy array of a number, has shape empty tuple '()'\n return False\n elif len(observation.shape) == 1:\n return True\n else:\n raise ValueError(\"Error: Unexpected observation shape {} for \".format(observation.shape) +\n \"Discrete environment, please use (1,) or (n_env, 1) for the observation shape.\")\n elif isinstance(observation_space, gym.spaces.MultiDiscrete):\n if observation.shape == (len(observation_space.nvec),):\n return False\n elif len(observation.shape) == 2 and observation.shape[1] == len(observation_space.nvec):\n return True\n else:\n raise ValueError(\"Error: Unexpected observation shape {} for MultiDiscrete \".format(observation.shape) +\n \"environment, please use ({},) or \".format(len(observation_space.nvec)) +\n \"(n_env, {}) for the observation shape.\".format(len(observation_space.nvec)))\n elif isinstance(observation_space, gym.spaces.MultiBinary):\n if observation.shape == (observation_space.n,):\n return False\n elif len(observation.shape) == 2 and observation.shape[1] == observation_space.n:\n return True\n else:\n raise ValueError(\"Error: Unexpected observation shape {} for MultiBinary \".format(observation.shape) +\n \"environment, please use ({},) or \".format(observation_space.n) +\n \"(n_env, {}) for the observation shape.\".format(observation_space.n))\n else:\n raise ValueError(\"Error: Cannot determine if the observation is vectorized with the space type {}.\"\n .format(observation_space))\n\n\nclass ActorCriticRLModel(BaseRLModel):\n \"\"\"\n The base class for Actor critic model\n\n :param policy: (BasePolicy) Policy object\n :param env: (Gym environment) The environment to learn from\n (if registered in Gym, can be str. Can be None for loading trained models)\n :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug\n :param policy_base: (BasePolicy) the base policy used by this method (default=ActorCriticPolicy)\n :param requires_vec_env: (bool) Does this model require a vectorized environment\n :param policy_kwargs: (dict) additional arguments to be passed to the policy on creation\n :param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).\n If None (default), use random seed. Note that if you want completely deterministic\n results, you must set `n_cpu_tf_sess` to 1.\n :param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations\n If None, the number of cpu of the current machine will be used.\n \"\"\"\n\n def __init__(self, policy, env, _init_setup_model, verbose=0, policy_base=ActorCriticPolicy,\n requires_vec_env=False, policy_kwargs=None, seed=None, n_cpu_tf_sess=None, composite_primitive_name=None):\n super(ActorCriticRLModel, self).__init__(policy, env, verbose=verbose, requires_vec_env=requires_vec_env,\n policy_base=policy_base, policy_kwargs=policy_kwargs,\n seed=seed, n_cpu_tf_sess=n_cpu_tf_sess, composite_primitive_name=composite_primitive_name)\n self.sess = None\n self.initial_state = None\n self.step = None\n self.proba_step = None\n self.params = None\n self._runner = None\n\n def _make_runner(self) -> AbstractEnvRunner:\n \"\"\"Builds a new Runner.\n\n Lazily called whenever `self.runner` is accessed and `self._runner is None`.\n \"\"\"\n raise NotImplementedError(\"This model is not configured to use a Runner\")\n\n @property\n def runner(self) -> AbstractEnvRunner:\n if self._runner is None:\n self._runner = self._make_runner()\n return self._runner\n\n def set_env(self, env):\n self._runner = None # New environment invalidates `self._runner`.\n super().set_env(env)\n\n @abstractmethod\n def setup_model(self):\n pass\n\n @abstractmethod\n def learn(self, total_timesteps, callback=None,\n log_interval=100, tb_log_name=\"run\", reset_num_timesteps=True):\n pass\n\n def predict(self, observation, state=None, mask=None, deterministic=False):\n if state is None:\n state = self.initial_state\n if mask is None:\n mask = [False for _ in range(self.n_envs)]\n observation = np.array(observation)\n vectorized_env = self._is_vectorized_observation(observation, self.observation_space)\n\n observation = observation.reshape((-1,) + self.observation_space.shape)\n actions, _, states, _ = self.step(observation, state, mask, deterministic=deterministic)\n \n clipped_actions = actions\n\n if not vectorized_env:\n if state is not None:\n raise ValueError(\"Error: The environment must be vectorized when using recurrent policies.\")\n clipped_actions = clipped_actions[0]\n\n return clipped_actions, states\n\n\n def action_probability(self, observation, state=None, mask=None, actions=None, logp=False):\n if state is None:\n state = self.initial_state\n if mask is None:\n mask = [False for _ in range(self.n_envs)]\n observation = np.array(observation)\n vectorized_env = self._is_vectorized_observation(observation, self.observation_space)\n\n observation = observation.reshape((-1,) + self.observation_space.shape)\n actions_proba = self.proba_step(observation, state, mask)\n\n if len(actions_proba) == 0: # empty list means not implemented\n warnings.warn(\"Warning: action probability is not implemented for {} action space. Returning None.\"\n .format(type(self.action_space).__name__))\n return None\n\n if actions is not None: # comparing the action distribution, to given actions\n prob = None\n logprob = None\n actions = np.array([actions])\n if isinstance(self.action_space, gym.spaces.Discrete):\n actions = actions.reshape((-1,))\n assert observation.shape[0] == actions.shape[0], \\\n \"Error: batch sizes differ for actions and observations.\"\n prob = actions_proba[np.arange(actions.shape[0]), actions]\n\n elif isinstance(self.action_space, gym.spaces.MultiDiscrete):\n actions = actions.reshape((-1, len(self.action_space.nvec)))\n assert observation.shape[0] == actions.shape[0], \\\n \"Error: batch sizes differ for actions and observations.\"\n # Discrete action probability, over multiple categories\n actions = np.swapaxes(actions, 0, 1) # swap axis for easier categorical split\n prob = np.prod([proba[np.arange(act.shape[0]), act]\n for proba, act in zip(actions_proba, actions)], axis=0)\n\n elif isinstance(self.action_space, gym.spaces.MultiBinary):\n actions = actions.reshape((-1, self.action_space.n))\n assert observation.shape[0] == actions.shape[0], \\\n \"Error: batch sizes differ for actions and observations.\"\n # Bernoulli action probability, for every action\n prob = np.prod(actions_proba * actions + (1 - actions_proba) * (1 - actions), axis=1)\n\n elif isinstance(self.action_space, gym.spaces.Box):\n actions = actions.reshape((-1, ) + self.action_space.shape)\n mean, logstd = actions_proba\n std = np.exp(logstd)\n\n n_elts = np.prod(mean.shape[1:]) # first dimension is batch size\n log_normalizer = n_elts/2 * np.log(2 * np.pi) + np.sum(logstd, axis=1)\n\n # Diagonal Gaussian action probability, for every action\n logprob = -np.sum(np.square(actions - mean) / (2 * np.square(std)), axis=1) - log_normalizer\n\n else:\n warnings.warn(\"Warning: action_probability not implemented for {} actions space. Returning None.\"\n .format(type(self.action_space).__name__))\n return None\n\n # Return in space (log or normal) requested by user, converting if necessary\n if logp:\n if logprob is None:\n logprob = np.log(prob)\n ret = logprob\n else:\n if prob is None:\n prob = np.exp(logprob)\n ret = prob\n\n # normalize action proba shape for the different gym spaces\n ret = ret.reshape((-1, 1))\n else:\n ret = actions_proba\n\n if not vectorized_env:\n if state is not None:\n raise ValueError(\"Error: The environment must be vectorized when using recurrent policies.\")\n ret = ret[0]\n\n return ret\n\n def get_parameter_list(self):\n return self.params\n\n @abstractmethod\n def save(self, save_path, cloudpickle=False):\n pass\n\n @classmethod\n def load(cls, load_path, env=None, custom_objects=None, **kwargs):\n \"\"\"\n Load the model from file\n\n :param load_path: (str or file-like) the saved parameter location\n :param env: (Gym Environment) the new environment to run the loaded model on\n (can be None if you only need prediction from a trained model)\n :param custom_objects: (dict) Dictionary of objects to replace\n upon loading. If a variable is present in this dictionary as a\n key, it will not be deserialized and the corresponding item\n will be used instead. Similar to custom_objects in\n `keras.models.load_model`. Useful when you have an object in\n file that can not be deserialized.\n :param kwargs: extra arguments to change the model when loading\n \"\"\"\n data, params = cls._load_from_file(load_path, custom_objects=custom_objects)\n\n if 'policy_kwargs' in kwargs and kwargs['policy_kwargs'] != data['policy_kwargs']:\n raise ValueError(\"The specified policy kwargs do not equal the stored policy kwargs. \"\n \"Stored kwargs: {}, specified kwargs: {}\".format(data['policy_kwargs'],\n kwargs['policy_kwargs']))\n\n model = cls(policy=data[\"policy\"], env=None, _init_setup_model=False) # pytype: disable=not-instantiable\n model.__dict__.update(data)\n model.__dict__.update(kwargs)\n model.set_env(env)\n model.setup_model()\n\n model.load_parameters(params)\n\n return model\n\n\nclass OffPolicyRLModel(BaseRLModel):\n \"\"\"\n The base class for off policy RL model\n\n :param policy: (BasePolicy) Policy object\n :param env: (Gym environment) The environment to learn from\n (if registered in Gym, can be str. Can be None for loading trained models)\n :param replay_buffer: (ReplayBuffer) the type of replay buffer\n :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug\n :param requires_vec_env: (bool) Does this model require a vectorized environment\n :param policy_base: (BasePolicy) the base policy used by this method\n :param policy_kwargs: (dict) additional arguments to be passed to the policy on creation\n :param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).\n If None (default), use random seed. Note that if you want completely deterministic\n results, you must set `n_cpu_tf_sess` to 1.\n :param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations\n If None, the number of cpu of the current machine will be used.\n \"\"\"\n\n def __init__(self, policy, env, replay_buffer=None, _init_setup_model=False, verbose=0, tensorboard_log=None,\n requires_vec_env=False, policy_base=None,\n policy_kwargs=None, seed=None, n_cpu_tf_sess=None, composite_primitive_name=None):\n super(OffPolicyRLModel, self).__init__(policy, env, verbose=verbose, requires_vec_env=requires_vec_env,\n policy_base=policy_base, policy_kwargs=policy_kwargs,\n seed=seed, n_cpu_tf_sess=n_cpu_tf_sess, composite_primitive_name=composite_primitive_name)\n\n self.replay_buffer = replay_buffer\n self.tensorboard_log = None\n\n @abstractmethod\n def setup_model(self):\n pass\n\n @abstractmethod\n def learn(self, total_timesteps, callback=None,\n log_interval=100, tb_log_name=\"run\", reset_num_timesteps=True, replay_wrapper=None):\n pass\n\n @abstractmethod\n def predict(self, observation, state=None, mask=None, deterministic=False):\n pass\n\n @abstractmethod\n def action_probability(self, observation, state=None, mask=None, actions=None, logp=False):\n pass\n\n @abstractmethod\n def save(self, save_path, cloudpickle=False):\n pass\n\n @classmethod\n def load(cls, load_path, policy=None, env=None, custom_objects=None, **kwargs):\n \"\"\"\n Load the model from file\n\n :param load_path: (str or file-like) the saved parameter location\n :param env: (Gym Environment) the new environment to run the loaded model on\n (can be None if you only need prediction from a trained model)\n :param custom_objects: (dict) Dictionary of objects to replace\n upon loading. If a variable is present in this dictionary as a\n key, it will not be deserialized and the corresponding item\n will be used instead. Similar to custom_objects in\n `keras.models.load_model`. Useful when you have an object in\n file that can not be deserialized.\n :param kwargs: extra arguments to change the model when loading\n \"\"\"\n data, params = cls._load_from_file(load_path, custom_objects=custom_objects)\n if 'policy_kwargs' in kwargs and kwargs['policy_kwargs'] != data['policy_kwargs']:\n raise ValueError(\"The specified policy kwargs do not equal the stored policy kwargs. \"\n \"Stored kwargs: {}, specified kwargs: {}\".format(data['policy_kwargs'],\n kwargs['policy_kwargs']))\n if policy is not None:\n policy_model = policy\n print(data)\n data.pop('policy')\n else:\n policy_model = data['policy']\n model = cls(policy=policy_model, env=None, _init_setup_model=False)\n model.__dict__.update(data)\n model.__dict__.update(kwargs)\n \n # NOTE: Once loaded, type of policy is fixed\n model.set_env(env)\n model.setup_model()\n\n model.load_parameters(params, exact_match=False)\n\n return model\n\n\nclass _UnvecWrapper(VecEnvWrapper):\n def __init__(self, venv):\n \"\"\"\n Unvectorize a vectorized environment, for vectorized environment that only have one environment\n\n :param venv: (VecEnv) the vectorized environment to wrap\n \"\"\"\n super().__init__(venv)\n assert venv.num_envs == 1, \"Error: cannot unwrap a environment wrapper that has more than one environment.\"\n\n def seed(self, seed=None):\n return self.venv.env_method('seed', seed)\n\n def __getattr__(self, attr):\n if attr in self.__dict__:\n return getattr(self, attr)\n return getattr(self.venv, attr)\n\n def __set_attr__(self, attr, value):\n if attr in self.__dict__:\n setattr(self, attr, value)\n else:\n setattr(self.venv, attr, value)\n\n def compute_reward(self, achieved_goal, desired_goal, _info):\n return float(self.venv.env_method('compute_reward', achieved_goal, desired_goal, _info)[0])\n\n @staticmethod\n def unvec_obs(obs):\n \"\"\"\n :param obs: (Union[np.ndarray, dict])\n :return: (Union[np.ndarray, dict])\n \"\"\"\n if not isinstance(obs, dict):\n return obs[0]\n obs_ = OrderedDict()\n for key in obs.keys():\n obs_[key] = obs[key][0]\n del obs\n return obs_\n\n def reset(self):\n return self.unvec_obs(self.venv.reset())\n\n def step_async(self, actions):\n self.venv.step_async([actions])\n\n def step_wait(self):\n obs, rewards, dones, information = self.venv.step_wait()\n return self.unvec_obs(obs), float(rewards[0]), dones[0], information[0]\n\n def render(self, mode='human'):\n return self.venv.render(mode=mode)\n\n\nclass SetVerbosity:\n def __init__(self, verbose=0):\n \"\"\"\n define a region of code for certain level of verbosity\n\n :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug\n \"\"\"\n self.verbose = verbose\n\n def __enter__(self):\n self.tf_level = os.environ.get('TF_CPP_MIN_LOG_LEVEL', '0')\n self.log_level = logger.get_level()\n self.gym_level = gym.logger.MIN_LEVEL\n\n if self.verbose <= 1:\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n if self.verbose <= 0:\n logger.set_level(logger.DISABLED)\n gym.logger.set_level(gym.logger.DISABLED)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.verbose <= 1:\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = self.tf_level\n\n if self.verbose <= 0:\n logger.set_level(self.log_level)\n gym.logger.set_level(self.gym_level)\n\n\nclass TensorboardWriter:\n def __init__(self, graph, tensorboard_log_path, tb_log_name, new_tb_log=True):\n \"\"\"\n Create a Tensorboard writer for a code segment, and saves it to the log directory as its own run\n\n :param graph: (Tensorflow Graph) the model graph\n :param tensorboard_log_path: (str) the save path for the log (can be None for no logging)\n :param tb_log_name: (str) the name of the run for tensorboard log\n :param new_tb_log: (bool) whether or not to create a new logging folder for tensorbaord\n \"\"\"\n self.graph = graph\n self.tensorboard_log_path = tensorboard_log_path\n self.tb_log_name = tb_log_name\n self.writer = None\n self.new_tb_log = new_tb_log\n\n def __enter__(self):\n if self.tensorboard_log_path is not None:\n latest_run_id = self._get_latest_run_id()\n if self.new_tb_log:\n latest_run_id = latest_run_id + 1\n save_path = os.path.join(self.tensorboard_log_path, \"{}_{}\".format(self.tb_log_name, latest_run_id))\n self.writer = tf.summary.FileWriter(save_path, graph=self.graph)\n return self.writer\n\n def _get_latest_run_id(self):\n \"\"\"\n returns the latest run number for the given log name and log path,\n by finding the greatest number in the directories.\n\n :return: (int) latest run number\n \"\"\"\n max_run_id = 0\n for path in glob.glob(\"{}/{}_[0-9]*\".format(self.tensorboard_log_path, self.tb_log_name)):\n file_name = path.split(os.sep)[-1]\n ext = file_name.split(\"_\")[-1]\n if self.tb_log_name == \"_\".join(file_name.split(\"_\")[:-1]) and ext.isdigit() and int(ext) > max_run_id:\n max_run_id = int(ext)\n return max_run_id\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.writer is not None:\n self.writer.add_graph(self.graph)\n self.writer.flush()\n" ]
[ [ "numpy.max", "tensorflow.train.AdamOptimizer", "numpy.exp", "numpy.square", "numpy.swapaxes", "numpy.arange", "tensorflow.stop_gradient", "tensorflow.square", "numpy.zeros", "numpy.log", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.one_hot", "numpy.array", "numpy.sum", "tensorflow.summary.FileWriter", "tensorflow.reduce_mean", "tensorflow.expand_dims", "numpy.prod", "tensorflow.variable_scope" ] ]
tsaikenyoo/BERT_SQuAD_pytorch
[ "411d9aa286081b398a0f60878ae751654a9cb53f" ]
[ "model/transformer/__main__.py" ]
[ "\nimport torch\nfrom . import *\nfrom .encoder import TransformerEncoder\n\ndef main():\n encoder = TransformerEncoder()\n input = torch.rand(2, 768)\n print(input)\n\n\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "torch.rand" ] ]
ronaldluc/muzero-general
[ "fa84fc0ddc43013e1b67a7ffc741e3433c90b696" ]
[ "models.py" ]
[ "import math\nfrom abc import ABC, abstractmethod\n\nimport torch\n\n\nclass MuZeroNetwork:\n def __new__(cls, config):\n if config.network == \"fullyconnected\":\n return MuZeroFullyConnectedNetwork(\n config.observation_shape,\n config.stacked_observations,\n len(config.action_space),\n config.encoding_size,\n config.fc_reward_layers,\n config.fc_value_layers,\n config.fc_policy_layers,\n config.fc_representation_layers,\n config.fc_dynamics_layers,\n config.support_size,\n )\n elif config.network == \"resnet\":\n return MuZeroResidualNetwork(\n config.observation_shape,\n config.stacked_observations,\n len(config.action_space),\n config.blocks,\n config.channels,\n config.reduced_channels_reward,\n config.reduced_channels_value,\n config.reduced_channels_policy,\n config.resnet_fc_reward_layers,\n config.resnet_fc_value_layers,\n config.resnet_fc_policy_layers,\n config.support_size,\n config.downsample,\n )\n else:\n raise NotImplementedError(\n 'The network parameter should be \"fullyconnected\" or \"resnet\".'\n )\n\n\ndef dict_to_cpu(dictionary):\n cpu_dict = {}\n for key, value in dictionary.items():\n if isinstance(value, torch.Tensor):\n cpu_dict[key] = value.cpu()\n elif isinstance(value, dict):\n cpu_dict[key] = dict_to_cpu(value)\n else:\n cpu_dict[key] = value\n return cpu_dict\n\n\nclass AbstractNetwork(ABC, torch.nn.Module):\n def __init__(self):\n super().__init__()\n pass\n\n @abstractmethod\n def initial_inference(self, observation):\n pass\n\n @abstractmethod\n def recurrent_inference(self, encoded_state, action):\n pass\n\n def get_weights(self):\n return dict_to_cpu(self.state_dict())\n\n def set_weights(self, weights):\n self.load_state_dict(weights)\n\n\n##################################\n######## Fully Connected #########\n\n\nclass MuZeroFullyConnectedNetwork(AbstractNetwork):\n def __init__(\n self,\n observation_shape,\n stacked_observations,\n action_space_size,\n encoding_size,\n fc_reward_layers,\n fc_value_layers,\n fc_policy_layers,\n fc_representation_layers,\n fc_dynamics_layers,\n support_size,\n ):\n super().__init__()\n self.action_space_size = action_space_size\n self.full_support_size = 2 * support_size + 1\n\n self.representation_network = torch.nn.DataParallel(\n mlp(\n observation_shape[0]\n * observation_shape[1]\n * observation_shape[2]\n * (stacked_observations + 1)\n + stacked_observations * observation_shape[1] * observation_shape[2],\n fc_representation_layers,\n encoding_size,\n )\n )\n\n self.dynamics_encoded_state_network = torch.nn.DataParallel(\n mlp(\n encoding_size + self.action_space_size,\n fc_dynamics_layers,\n encoding_size,\n )\n )\n self.dynamics_reward_network = torch.nn.DataParallel(\n mlp(encoding_size, fc_reward_layers, self.full_support_size)\n )\n\n self.prediction_policy_network = torch.nn.DataParallel(\n mlp(encoding_size, fc_policy_layers, self.action_space_size)\n )\n self.prediction_value_network = torch.nn.DataParallel(\n mlp(encoding_size, fc_value_layers, self.full_support_size)\n )\n\n def prediction(self, encoded_state):\n policy_logits = self.prediction_policy_network(encoded_state)\n value = self.prediction_value_network(encoded_state)\n return policy_logits, value\n\n def representation(self, observation):\n encoded_state = self.representation_network(\n observation.view(observation.shape[0], -1)\n )\n # Scale encoded state between [0, 1] (See appendix paper Training)\n min_encoded_state = encoded_state.min(1, keepdim=True)[0]\n max_encoded_state = encoded_state.max(1, keepdim=True)[0]\n scale_encoded_state = max_encoded_state - min_encoded_state\n scale_encoded_state[scale_encoded_state < 1e-5] += 1e-5\n encoded_state_normalized = (\n encoded_state - min_encoded_state\n ) / scale_encoded_state\n return encoded_state_normalized\n\n def dynamics(self, encoded_state, action):\n # Stack encoded_state with a game specific one hot encoded action (See paper appendix Network Architecture)\n action_one_hot = (\n torch.zeros((action.shape[0], self.action_space_size))\n .to(action.device)\n .float()\n )\n action_one_hot.scatter_(1, action.long(), 1.0)\n x = torch.cat((encoded_state, action_one_hot), dim=1)\n\n next_encoded_state = self.dynamics_encoded_state_network(x)\n\n reward = self.dynamics_reward_network(next_encoded_state)\n\n # Scale encoded state between [0, 1] (See paper appendix Training)\n min_next_encoded_state = next_encoded_state.min(1, keepdim=True)[0]\n max_next_encoded_state = next_encoded_state.max(1, keepdim=True)[0]\n scale_next_encoded_state = max_next_encoded_state - min_next_encoded_state\n scale_next_encoded_state[scale_next_encoded_state < 1e-5] += 1e-5\n next_encoded_state_normalized = (\n next_encoded_state - min_next_encoded_state\n ) / scale_next_encoded_state\n\n return next_encoded_state_normalized, reward\n\n def initial_inference(self, observation):\n encoded_state = self.representation(observation)\n policy_logits, value = self.prediction(encoded_state)\n # reward equal to 0 for consistency\n reward = torch.log(\n (\n torch.zeros(1, self.full_support_size)\n .scatter(1, torch.tensor([[self.full_support_size // 2]]).long(), 1.0)\n .repeat(len(observation), 1)\n .to(observation.device)\n )\n )\n\n return (\n value,\n reward,\n policy_logits,\n encoded_state,\n )\n\n def recurrent_inference(self, encoded_state, action):\n next_encoded_state, reward = self.dynamics(encoded_state, action)\n policy_logits, value = self.prediction(next_encoded_state)\n return value, reward, policy_logits, next_encoded_state\n\n\n###### End Fully Connected #######\n##################################\n\n\n##################################\n############# ResNet #############\n\n\ndef conv3x3(in_channels, out_channels, stride=1):\n return torch.nn.Conv2d(\n in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False\n )\n\n\n# Residual block\nclass ResidualBlock(torch.nn.Module):\n def __init__(self, num_channels, stride=1):\n super().__init__()\n self.conv1 = conv3x3(num_channels, num_channels, stride)\n self.bn1 = torch.nn.BatchNorm2d(num_channels)\n self.conv2 = conv3x3(num_channels, num_channels)\n self.bn2 = torch.nn.BatchNorm2d(num_channels)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = torch.nn.functional.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out += x\n out = torch.nn.functional.relu(out)\n return out\n\n\n# Downsample observations before representation network (See paper appendix Network Architecture)\nclass DownSample(torch.nn.Module):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.conv1 = torch.nn.Conv2d(\n in_channels,\n out_channels // 2,\n kernel_size=3,\n stride=2,\n padding=1,\n bias=False,\n )\n self.resblocks1 = torch.nn.ModuleList(\n [ResidualBlock(out_channels // 2) for _ in range(2)]\n )\n self.conv2 = torch.nn.Conv2d(\n out_channels // 2,\n out_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n bias=False,\n )\n self.resblocks2 = torch.nn.ModuleList(\n [ResidualBlock(out_channels) for _ in range(3)]\n )\n self.pooling1 = torch.nn.AvgPool2d(kernel_size=3, stride=2, padding=1)\n self.resblocks3 = torch.nn.ModuleList(\n [ResidualBlock(out_channels) for _ in range(3)]\n )\n self.pooling2 = torch.nn.AvgPool2d(kernel_size=3, stride=2, padding=1)\n\n def forward(self, x):\n x = self.conv1(x)\n for block in self.resblocks1:\n x = block(x)\n x = self.conv2(x)\n for block in self.resblocks2:\n x = block(x)\n x = self.pooling1(x)\n for block in self.resblocks3:\n x = block(x)\n x = self.pooling2(x)\n return x\n\n\nclass DownsampleCNN(torch.nn.Module):\n def __init__(self, in_channels, out_channels, h_w):\n super().__init__()\n mid_channels = (in_channels + out_channels) // 2\n self.features = torch.nn.Sequential(\n torch.nn.Conv2d(\n in_channels, mid_channels, kernel_size=h_w[0] * 2, stride=4, padding=2\n ),\n torch.nn.ReLU(inplace=True),\n torch.nn.MaxPool2d(kernel_size=3, stride=2),\n torch.nn.Conv2d(mid_channels, out_channels, kernel_size=5, padding=2),\n torch.nn.ReLU(inplace=True),\n torch.nn.MaxPool2d(kernel_size=3, stride=2),\n )\n self.avgpool = torch.nn.AdaptiveAvgPool2d(h_w)\n\n def forward(self, x):\n x = self.features(x)\n x = self.avgpool(x)\n return x\n\n\nclass RepresentationNetwork(torch.nn.Module):\n def __init__(\n self,\n observation_shape,\n stacked_observations,\n num_blocks,\n num_channels,\n downsample,\n ):\n super().__init__()\n self.downsample = downsample\n if self.downsample:\n if self.downsample == \"resnet\":\n self.downsample_net = DownSample(\n observation_shape[0] * (stacked_observations + 1)\n + stacked_observations,\n num_channels,\n )\n elif self.downsample == \"CNN\":\n self.downsample_net = DownsampleCNN(\n observation_shape[0] * (stacked_observations + 1)\n + stacked_observations,\n num_channels,\n (\n math.ceil(observation_shape[1] / 16),\n math.ceil(observation_shape[2] / 16),\n ),\n )\n else:\n raise NotImplementedError('downsample should be \"resnet\" or \"CNN\".')\n self.conv = conv3x3(\n observation_shape[0] * (stacked_observations + 1) + stacked_observations,\n num_channels,\n )\n self.bn = torch.nn.BatchNorm2d(num_channels)\n self.resblocks = torch.nn.ModuleList(\n [ResidualBlock(num_channels) for _ in range(num_blocks)]\n )\n\n def forward(self, x):\n if self.downsample:\n x = self.downsample_net(x)\n else:\n x = self.conv(x)\n x = self.bn(x)\n x = torch.nn.functional.relu(x)\n\n for block in self.resblocks:\n x = block(x)\n return x\n\n\nclass DynamicsNetwork(torch.nn.Module):\n def __init__(\n self,\n num_blocks,\n num_channels,\n reduced_channels_reward,\n fc_reward_layers,\n full_support_size,\n block_output_size_reward,\n ):\n super().__init__()\n self.conv = conv3x3(num_channels, num_channels - 1)\n self.bn = torch.nn.BatchNorm2d(num_channels - 1)\n self.resblocks = torch.nn.ModuleList(\n [ResidualBlock(num_channels - 1) for _ in range(num_blocks)]\n )\n\n self.conv1x1_reward = torch.nn.Conv2d(\n num_channels - 1, reduced_channels_reward, 1\n )\n self.block_output_size_reward = block_output_size_reward\n self.fc = mlp(\n self.block_output_size_reward, fc_reward_layers, full_support_size,\n )\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = torch.nn.functional.relu(x)\n for block in self.resblocks:\n x = block(x)\n state = x\n x = self.conv1x1_reward(x)\n x = x.view(-1, self.block_output_size_reward)\n reward = self.fc(x)\n return state, reward\n\n\nclass PredictionNetwork(torch.nn.Module):\n def __init__(\n self,\n action_space_size,\n num_blocks,\n num_channels,\n reduced_channels_value,\n reduced_channels_policy,\n fc_value_layers,\n fc_policy_layers,\n full_support_size,\n block_output_size_value,\n block_output_size_policy,\n ):\n super().__init__()\n self.resblocks = torch.nn.ModuleList(\n [ResidualBlock(num_channels) for _ in range(num_blocks)]\n )\n\n self.conv1x1_value = torch.nn.Conv2d(num_channels, reduced_channels_value, 1)\n self.conv1x1_policy = torch.nn.Conv2d(num_channels, reduced_channels_policy, 1)\n self.block_output_size_value = block_output_size_value\n self.block_output_size_policy = block_output_size_policy\n self.fc_value = mlp(\n self.block_output_size_value, fc_value_layers, full_support_size\n )\n self.fc_policy = mlp(\n self.block_output_size_policy, fc_policy_layers, action_space_size,\n )\n\n def forward(self, x):\n for block in self.resblocks:\n x = block(x)\n value = self.conv1x1_value(x)\n policy = self.conv1x1_policy(x)\n value = value.view(-1, self.block_output_size_value)\n policy = policy.view(-1, self.block_output_size_policy)\n value = self.fc_value(value)\n policy = self.fc_policy(policy)\n return policy, value\n\n\nclass MuZeroResidualNetwork(AbstractNetwork):\n def __init__(\n self,\n observation_shape,\n stacked_observations,\n action_space_size,\n num_blocks,\n num_channels,\n reduced_channels_reward,\n reduced_channels_value,\n reduced_channels_policy,\n fc_reward_layers,\n fc_value_layers,\n fc_policy_layers,\n support_size,\n downsample,\n ):\n super().__init__()\n self.action_space_size = action_space_size\n self.full_support_size = 2 * support_size + 1\n block_output_size_reward = (\n (\n reduced_channels_reward\n * math.ceil(observation_shape[1] / 16)\n * math.ceil(observation_shape[2] / 16)\n )\n if downsample\n else (reduced_channels_reward * observation_shape[1] * observation_shape[2])\n )\n\n block_output_size_value = (\n (\n reduced_channels_value\n * math.ceil(observation_shape[1] / 16)\n * math.ceil(observation_shape[2] / 16)\n )\n if downsample\n else (reduced_channels_value * observation_shape[1] * observation_shape[2])\n )\n\n block_output_size_policy = (\n (\n reduced_channels_policy\n * math.ceil(observation_shape[1] / 16)\n * math.ceil(observation_shape[2] / 16)\n )\n if downsample\n else (reduced_channels_policy * observation_shape[1] * observation_shape[2])\n )\n\n self.representation_network = torch.nn.DataParallel(\n RepresentationNetwork(\n observation_shape,\n stacked_observations,\n num_blocks,\n num_channels,\n downsample,\n )\n )\n\n self.dynamics_network = torch.nn.DataParallel(\n DynamicsNetwork(\n num_blocks,\n num_channels + 1,\n reduced_channels_reward,\n fc_reward_layers,\n self.full_support_size,\n block_output_size_reward,\n )\n )\n\n self.prediction_network = torch.nn.DataParallel(\n PredictionNetwork(\n action_space_size,\n num_blocks,\n num_channels,\n reduced_channels_value,\n reduced_channels_policy,\n fc_value_layers,\n fc_policy_layers,\n self.full_support_size,\n block_output_size_value,\n block_output_size_policy,\n )\n )\n\n def prediction(self, encoded_state):\n policy, value = self.prediction_network(encoded_state)\n return policy, value\n\n def representation(self, observation):\n encoded_state = self.representation_network(observation)\n\n # Scale encoded state between [0, 1] (See appendix paper Training)\n min_encoded_state = (\n encoded_state.view(\n -1,\n encoded_state.shape[1],\n encoded_state.shape[2] * encoded_state.shape[3],\n )\n .min(2, keepdim=True)[0]\n .unsqueeze(-1)\n )\n max_encoded_state = (\n encoded_state.view(\n -1,\n encoded_state.shape[1],\n encoded_state.shape[2] * encoded_state.shape[3],\n )\n .max(2, keepdim=True)[0]\n .unsqueeze(-1)\n )\n scale_encoded_state = max_encoded_state - min_encoded_state\n scale_encoded_state[scale_encoded_state < 1e-5] += 1e-5\n encoded_state_normalized = (\n encoded_state - min_encoded_state\n ) / scale_encoded_state\n return encoded_state_normalized\n\n def dynamics(self, encoded_state, action):\n # Stack encoded_state with a game specific one hot encoded action (See paper appendix Network Architecture)\n action_one_hot = (\n torch.ones(\n (\n encoded_state.shape[0],\n 1,\n encoded_state.shape[2],\n encoded_state.shape[3],\n )\n )\n .to(action.device)\n .float()\n )\n action_one_hot = (\n action[:, :, None, None] * action_one_hot / self.action_space_size\n )\n x = torch.cat((encoded_state, action_one_hot), dim=1)\n next_encoded_state, reward = self.dynamics_network(x)\n\n # Scale encoded state between [0, 1] (See paper appendix Training)\n min_next_encoded_state = (\n next_encoded_state.view(\n -1,\n next_encoded_state.shape[1],\n next_encoded_state.shape[2] * next_encoded_state.shape[3],\n )\n .min(2, keepdim=True)[0]\n .unsqueeze(-1)\n )\n max_next_encoded_state = (\n next_encoded_state.view(\n -1,\n next_encoded_state.shape[1],\n next_encoded_state.shape[2] * next_encoded_state.shape[3],\n )\n .max(2, keepdim=True)[0]\n .unsqueeze(-1)\n )\n scale_next_encoded_state = max_next_encoded_state - min_next_encoded_state\n scale_next_encoded_state[scale_next_encoded_state < 1e-5] += 1e-5\n next_encoded_state_normalized = (\n next_encoded_state - min_next_encoded_state\n ) / scale_next_encoded_state\n return next_encoded_state_normalized, reward\n\n def initial_inference(self, observation):\n encoded_state = self.representation(observation)\n policy_logits, value = self.prediction(encoded_state)\n # reward equal to 0 for consistency\n reward = torch.log(\n (\n torch.zeros(1, self.full_support_size)\n .scatter(1, torch.tensor([[self.full_support_size // 2]]).long(), 1.0)\n .repeat(len(observation), 1)\n .to(observation.device)\n )\n )\n return (\n value,\n reward,\n policy_logits,\n encoded_state,\n )\n\n def recurrent_inference(self, encoded_state, action):\n next_encoded_state, reward = self.dynamics(encoded_state, action)\n policy_logits, value = self.prediction(next_encoded_state)\n return value, reward, policy_logits, next_encoded_state\n\n\n########### End ResNet ###########\n##################################\n\n\ndef mlp(\n input_size,\n layer_sizes,\n output_size,\n output_activation=torch.nn.Identity,\n activation=torch.nn.ELU,\n):\n sizes = [input_size] + layer_sizes + [output_size]\n layers = []\n for i in range(len(sizes) - 1):\n act = activation if i < len(sizes) - 2 else output_activation\n layers += [torch.nn.Linear(sizes[i], sizes[i + 1]), act()]\n return torch.nn.Sequential(*layers)\n\n\ndef support_to_scalar(logits, support_size):\n \"\"\"\n Transform a categorical representation to a scalar\n See paper appendix Network Architecture\n \"\"\"\n # Decode to a scalar\n probabilities = torch.softmax(logits, dim=1)\n support = (\n torch.tensor([x for x in range(-support_size, support_size + 1)])\n .expand(probabilities.shape)\n .float()\n .to(device=probabilities.device)\n )\n x = torch.sum(support * probabilities, dim=1, keepdim=True)\n\n # Invert the scaling (defined in https://arxiv.org/abs/1805.11593)\n x = torch.sign(x) * (\n ((torch.sqrt(1 + 4 * 0.001 * (torch.abs(x) + 1 + 0.001)) - 1) / (2 * 0.001))\n ** 2\n - 1\n )\n return x\n\n\ndef scalar_to_support(x, support_size):\n \"\"\"\n Transform a scalar to a categorical representation with (2 * support_size + 1) categories\n See paper appendix Network Architecture\n \"\"\"\n # Reduce the scale (defined in https://arxiv.org/abs/1805.11593)\n x = torch.sign(x) * (torch.sqrt(torch.abs(x) + 1) - 1) + 0.001 * x\n\n # Encode on a vector\n x = torch.clamp(x, -support_size, support_size)\n floor = x.floor()\n prob = x - floor\n logits = torch.zeros(x.shape[0], x.shape[1], 2 * support_size + 1).to(x.device)\n logits.scatter_(\n 2, (floor + support_size).long().unsqueeze(-1), (1 - prob).unsqueeze(-1)\n )\n indexes = floor + support_size + 1\n prob = prob.masked_fill_(2 * support_size < indexes, 0.0)\n indexes = indexes.masked_fill_(2 * support_size < indexes, 0.0)\n logits.scatter_(2, indexes.long().unsqueeze(-1), prob.unsqueeze(-1))\n return logits\n" ]
[ [ "torch.nn.Sequential", "torch.softmax", "torch.abs", "torch.ones", "torch.cat", "torch.sign", "torch.zeros", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.sum", "torch.tensor", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "torch.nn.functional.relu", "torch.nn.AdaptiveAvgPool2d", "torch.nn.Linear", "torch.nn.BatchNorm2d", "torch.clamp" ] ]
mmmorks/panda
[ "d020b099fdf1a8e5ecd4d8b5289644e74264b9d7" ]
[ "tests/safety/test_tesla.py" ]
[ "#!/usr/bin/env python3\nimport unittest\nimport numpy as np\nfrom panda import Panda\nimport panda.tests.safety.common as common\nfrom panda.tests.safety import libpandasafety_py\nfrom panda.tests.safety.common import CANPackerPanda\n\nANGLE_DELTA_BP = [0., 5., 15.]\nANGLE_DELTA_V = [5., .8, .15] # windup limit\nANGLE_DELTA_VU = [5., 3.5, 0.4] # unwind limit\n\nMAX_ACCEL = 2.0\nMIN_ACCEL = -3.5\n\nclass CONTROL_LEVER_STATE:\n DN_1ST = 32\n UP_1ST = 16\n DN_2ND = 8\n UP_2ND = 4\n RWD = 2\n FWD = 1\n IDLE = 0\n\ndef sign(a):\n return 1 if a > 0 else -1\n\nclass TestTeslaSafety(common.PandaSafetyTest):\n STANDSTILL_THRESHOLD = 0\n GAS_PRESSED_THRESHOLD = 3\n RELAY_MALFUNCTION_BUS = 0\n FWD_BUS_LOOKUP = {0: 2, 2: 0}\n\n def setUp(self):\n self.packer = None\n raise unittest.SkipTest\n\n def _angle_meas_msg(self, angle):\n values = {\"EPAS_internalSAS\": angle}\n return self.packer.make_can_msg_panda(\"EPAS_sysStatus\", 0, values)\n\n def _set_prev_angle(self, t):\n t = int(t * 10)\n self.safety.set_desired_angle_last(t)\n\n def _angle_meas_msg_array(self, angle):\n for _ in range(6):\n self._rx(self._angle_meas_msg(angle))\n\n def _lkas_control_msg(self, angle, enabled):\n values = {\"DAS_steeringAngleRequest\": angle, \"DAS_steeringControlType\": 1 if enabled else 0}\n return self.packer.make_can_msg_panda(\"DAS_steeringControl\", 0, values)\n\n def _speed_msg(self, speed):\n values = {\"DI_vehicleSpeed\": speed / 0.447}\n return self.packer.make_can_msg_panda(\"DI_torque2\", 0, values)\n\n def _user_brake_msg(self, brake):\n values = {\"driverBrakeStatus\": 2 if brake else 1}\n return self.packer.make_can_msg_panda(\"BrakeMessage\", 0, values)\n\n def _user_gas_msg(self, gas):\n values = {\"DI_pedalPos\": gas}\n return self.packer.make_can_msg_panda(\"DI_torque1\", 0, values)\n\n def _control_lever_cmd(self, command):\n values = {\"SpdCtrlLvr_Stat\": command}\n return self.packer.make_can_msg_panda(\"STW_ACTN_RQ\", 0, values)\n\n def _pcm_status_msg(self, enable):\n values = {\"DI_cruiseState\": 2 if enable else 0}\n return self.packer.make_can_msg_panda(\"DI_state\", 0, values)\n\n def _long_control_msg(self, set_speed, acc_val=0, jerk_limits=(0, 0), accel_limits=(0, 0), aeb_event=0):\n values = {\n \"DAS_setSpeed\": set_speed,\n \"DAS_accState\": acc_val,\n \"DAS_aebEvent\": aeb_event,\n \"DAS_jerkMin\": jerk_limits[0],\n \"DAS_jerkMax\": jerk_limits[1],\n \"DAS_accelMin\": accel_limits[0],\n \"DAS_accelMax\": accel_limits[1],\n }\n return self.packer.make_can_msg_panda(\"DAS_control\", 0, values)\n\nclass TestTeslaSteeringSafety(TestTeslaSafety):\n TX_MSGS = [[0x488, 0], [0x45, 0], [0x45, 2]]\n RELAY_MALFUNCTION_ADDR = 0x488\n FWD_BLACKLISTED_ADDRS = {2: [0x488]}\n\n def setUp(self):\n self.packer = CANPackerPanda(\"tesla_can\")\n self.safety = libpandasafety_py.libpandasafety\n self.safety.set_safety_hooks(Panda.SAFETY_TESLA, 0)\n self.safety.init_tests()\n\n def test_angle_cmd_when_enabled(self):\n # when controls are allowed, angle cmd rate limit is enforced\n speeds = [0., 1., 5., 10., 15., 50.]\n angles = [-300, -100, -10, 0, 10, 100, 300]\n for a in angles:\n for s in speeds:\n max_delta_up = np.interp(s, ANGLE_DELTA_BP, ANGLE_DELTA_V)\n max_delta_down = np.interp(s, ANGLE_DELTA_BP, ANGLE_DELTA_VU)\n\n # first test against false positives\n self._angle_meas_msg_array(a)\n self._rx(self._speed_msg(s))\n\n self._set_prev_angle(a)\n self.safety.set_controls_allowed(1)\n\n # Stay within limits\n # Up\n self.assertEqual(True, self._tx(self._lkas_control_msg(a + sign(a) * max_delta_up, 1)))\n self.assertTrue(self.safety.get_controls_allowed())\n\n # Don't change\n self.assertEqual(True, self._tx(self._lkas_control_msg(a, 1)))\n self.assertTrue(self.safety.get_controls_allowed())\n\n # Down\n self.assertEqual(True, self._tx(self._lkas_control_msg(a - sign(a) * max_delta_down, 1)))\n self.assertTrue(self.safety.get_controls_allowed())\n\n # Inject too high rates\n # Up\n self.assertEqual(False, self._tx(self._lkas_control_msg(a + sign(a) * (max_delta_up + 1.1), 1)))\n\n # Don't change\n self.safety.set_controls_allowed(1)\n self._set_prev_angle(a)\n self.assertTrue(self.safety.get_controls_allowed())\n self.assertEqual(True, self._tx(self._lkas_control_msg(a, 1)))\n self.assertTrue(self.safety.get_controls_allowed())\n\n # Down\n self.assertEqual(False, self._tx(self._lkas_control_msg(a - sign(a) * (max_delta_down + 1.1), 1)))\n\n # Check desired steer should be the same as steer angle when controls are off\n self.safety.set_controls_allowed(0)\n self.assertEqual(True, self._tx(self._lkas_control_msg(a, 0)))\n\n def test_angle_cmd_when_disabled(self):\n self.safety.set_controls_allowed(0)\n\n self._set_prev_angle(0)\n self.assertFalse(self._tx(self._lkas_control_msg(0, 1)))\n self.assertFalse(self.safety.get_controls_allowed())\n\n def test_acc_buttons(self):\n \"\"\"\n FWD (cancel) always allowed.\n \"\"\"\n btns = [\n (CONTROL_LEVER_STATE.FWD, True),\n (CONTROL_LEVER_STATE.RWD, False),\n (CONTROL_LEVER_STATE.UP_1ST, False),\n (CONTROL_LEVER_STATE.UP_2ND, False),\n (CONTROL_LEVER_STATE.DN_1ST, False),\n (CONTROL_LEVER_STATE.DN_2ND, False),\n (CONTROL_LEVER_STATE.IDLE, False),\n ]\n for btn, should_tx in btns:\n for controls_allowed in (True, False):\n self.safety.set_controls_allowed(controls_allowed)\n tx = self._tx(self._control_lever_cmd(btn))\n self.assertEqual(tx, should_tx)\n\n\nclass TestTeslaLongitudinalSafety(TestTeslaSafety):\n def setUp(self):\n raise unittest.SkipTest\n\n def test_no_aeb(self):\n for aeb_event in range(4):\n self.assertEqual(self._tx(self._long_control_msg(10, aeb_event=aeb_event)), aeb_event == 0)\n\n def test_stock_aeb_passthrough(self):\n no_aeb_msg = self._long_control_msg(10, aeb_event=0)\n aeb_msg = self._long_control_msg(10, aeb_event=1)\n\n # stock system sends no AEB -> no forwarding, and OP is allowed to TX\n self.assertEqual(-1, self.safety.safety_fwd_hook(2, no_aeb_msg))\n self.assertEqual(True, self._tx(no_aeb_msg))\n\n # stock system sends AEB -> forwarding, and OP is not allowed to TX\n self.assertEqual(0, self.safety.safety_fwd_hook(2, aeb_msg))\n self.assertEqual(False, self._tx(no_aeb_msg))\n\n # reset global state for next tests\n self.assertEqual(-1, self.safety.safety_fwd_hook(2, no_aeb_msg))\n\n def test_acc_accel_limits(self):\n for controls_allowed in [True, False]:\n self.safety.set_controls_allowed(controls_allowed)\n for min_accel in np.arange(MIN_ACCEL - 1, MAX_ACCEL + 1, 0.1):\n for max_accel in np.arange(MIN_ACCEL - 1, MAX_ACCEL + 1, 0.1):\n # floats might not hit exact boundary conditions without rounding\n min_accel = round(min_accel, 2)\n max_accel = round(max_accel, 2)\n if controls_allowed:\n send = (MIN_ACCEL <= min_accel <= MAX_ACCEL) and (MIN_ACCEL <= max_accel <= MAX_ACCEL)\n else:\n send = np.all(np.isclose([min_accel, max_accel], 0, atol=0.0001))\n self.assertEqual(send, self._tx(self._long_control_msg(10, acc_val=4, accel_limits=[min_accel, max_accel])))\n\nclass TestTeslaChassisLongitudinalSafety(TestTeslaLongitudinalSafety):\n TX_MSGS = [[0x488, 0], [0x45, 0], [0x45, 2], [0x2B9, 0]]\n RELAY_MALFUNCTION_ADDR = 0x488\n FWD_BLACKLISTED_ADDRS = {2: [0x2B9, 0x488]}\n\n def setUp(self):\n self.packer = CANPackerPanda(\"tesla_can\")\n self.safety = libpandasafety_py.libpandasafety\n self.safety.set_safety_hooks(Panda.SAFETY_TESLA, Panda.FLAG_TESLA_LONG_CONTROL)\n self.safety.init_tests()\n\nclass TestTeslaPTLongitudinalSafety(TestTeslaLongitudinalSafety):\n TX_MSGS = [[0x2BF, 0]]\n RELAY_MALFUNCTION_ADDR = 0x2BF\n FWD_BLACKLISTED_ADDRS = {2: [0x2BF]}\n\n def setUp(self):\n self.packer = CANPackerPanda(\"tesla_powertrain\")\n self.safety = libpandasafety_py.libpandasafety\n self.safety.set_safety_hooks(Panda.SAFETY_TESLA, Panda.FLAG_TESLA_LONG_CONTROL | Panda.FLAG_TESLA_POWERTRAIN)\n self.safety.init_tests()\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.arange", "numpy.interp", "numpy.isclose" ] ]
zidingz/datasets
[ "02edd9ebc79f715adb1c718d1439fda83dc356f1" ]
[ "src/datasets/search.py" ]
[ "import importlib.util\nimport os\nimport tempfile\nfrom pathlib import PurePath\nfrom typing import TYPE_CHECKING, Dict, List, NamedTuple, Optional, Union\n\nimport numpy as np\n\nfrom . import utils\nfrom .utils import logging\n\n\nif TYPE_CHECKING:\n from .arrow_dataset import Dataset # noqa: F401\n\n try:\n from elasticsearch import Elasticsearch # noqa: F401\n\n except ImportError:\n pass\n try:\n import faiss # noqa: F401\n\n except ImportError:\n pass\n\n_has_elasticsearch = importlib.util.find_spec(\"elasticsearch\") is not None\n_has_faiss = importlib.util.find_spec(\"faiss\") is not None\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass MissingIndex(Exception):\n pass\n\n\nSearchResults = NamedTuple(\"SearchResults\", [(\"scores\", List[float]), (\"indices\", List[int])])\nBatchedSearchResults = NamedTuple(\n \"BatchedSearchResults\", [(\"total_scores\", List[List[float]]), (\"total_indices\", List[List[int]])]\n)\n\nNearestExamplesResults = NamedTuple(\"NearestExamplesResults\", [(\"scores\", List[float]), (\"examples\", dict)])\nBatchedNearestExamplesResults = NamedTuple(\n \"BatchedNearestExamplesResults\", [(\"total_scores\", List[List[float]]), (\"total_examples\", List[dict])]\n)\n\n\nclass BaseIndex:\n \"\"\"Base class for indexing\"\"\"\n\n def search(self, query, k: int = 10) -> SearchResults:\n \"\"\"\n To implement.\n This method has to return the scores and the indices of the retrieved examples given a certain query.\n \"\"\"\n raise NotImplementedError\n\n def search_batch(self, queries, k: int = 10) -> BatchedSearchResults:\n \"\"\"Find the nearest examples indices to the query.\n\n Args:\n queries (`Union[List[str], np.ndarray]`): The queries as a list of strings if `column` is a text index or as a numpy array if `column` is a vector index.\n k (`int`): The number of examples to retrieve per query.\n\n Ouput:\n total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.\n total_indices (`List[List[int]]`): The indices of the retrieved examples per query.\n \"\"\"\n total_scores, total_indices = [], []\n for query in queries:\n scores, indices = self.search(query, k)\n total_scores.append(scores)\n total_indices.append(indices)\n return BatchedSearchResults(total_scores, total_indices)\n\n def save(self, file: Union[str, PurePath]):\n \"\"\"Serialize the index on disk\"\"\"\n raise NotImplementedError\n\n @classmethod\n def load(cls, file: Union[str, PurePath]) -> \"BaseIndex\":\n \"\"\"Deserialize the index from disk\"\"\"\n raise NotImplementedError\n\n\nclass ElasticSearchIndex(BaseIndex):\n \"\"\"\n Sparse index using Elasticsearch. It is used to index text and run queries based on BM25 similarity.\n An Elasticsearch server needs to be accessible, and a python client is declared with\n ```\n es_client = Elasticsearch([{'host': 'localhost', 'port': '9200'}])\n ```\n for example.\n \"\"\"\n\n def __init__(\n self,\n host: Optional[str] = None,\n port: Optional[int] = None,\n es_client: Optional[\"Elasticsearch\"] = None,\n es_index_name: Optional[str] = None,\n es_index_config: Optional[dict] = None,\n ):\n assert (\n _has_elasticsearch\n ), \"You must install ElasticSearch to use ElasticSearchIndex. To do so you can run `pip install elasticsearch==7.7.1 for example`\"\n assert es_client is None or (\n host is None and port is None\n ), \"Please specify either `es_client` or `(host, port)`, but not both.\"\n host = host or \"localhost\"\n port = port or 9200\n\n import elasticsearch.helpers # noqa: need this to properly load all the es features\n from elasticsearch import Elasticsearch # noqa: F811\n\n self.es_client = es_client if es_client is not None else Elasticsearch([{\"host\": host, \"port\": str(port)}])\n self.es_index_name = (\n es_index_name\n if es_index_name is not None\n else \"huggingface_datasets_\" + os.path.basename(tempfile.NamedTemporaryFile().name)\n )\n self.es_index_config = (\n es_index_config\n if es_index_config is not None\n else {\n \"settings\": {\n \"number_of_shards\": 1,\n \"analysis\": {\"analyzer\": {\"stop_standard\": {\"type\": \"standard\", \" stopwords\": \"_english_\"}}},\n },\n \"mappings\": {\"properties\": {\"text\": {\"type\": \"text\", \"analyzer\": \"standard\", \"similarity\": \"BM25\"}}},\n }\n )\n\n def add_documents(self, documents: Union[List[str], \"Dataset\"], column: Optional[str] = None):\n \"\"\"\n Add documents to the index.\n If the documents are inside a certain column, you can specify it using the `column` argument.\n \"\"\"\n index_name = self.es_index_name\n index_config = self.es_index_config\n self.es_client.indices.create(index=index_name, body=index_config)\n number_of_docs = len(documents)\n progress = utils.tqdm(\n unit=\"docs\", total=number_of_docs, disable=bool(logging.get_verbosity() == logging.NOTSET)\n )\n successes = 0\n\n def passage_generator():\n if column is not None:\n for i, example in enumerate(documents):\n yield {\"text\": example[column], \"_id\": i}\n else:\n for i, example in enumerate(documents):\n yield {\"text\": example, \"_id\": i}\n\n # create the ES index\n import elasticsearch as es\n\n for ok, action in es.helpers.streaming_bulk(\n client=self.es_client,\n index=index_name,\n actions=passage_generator(),\n ):\n progress.update(1)\n successes += ok\n if successes != len(documents):\n logger.warning(\n f\"Some documents failed to be added to ElasticSearch. Failures: {len(documents)-successes}/{len(documents)}\"\n )\n logger.info(\"Indexed %d documents\" % (successes,))\n\n def search(self, query: str, k=10) -> SearchResults:\n \"\"\"Find the nearest examples indices to the query.\n\n Args:\n query (`str`): The query as a string.\n k (`int`): The number of examples to retrieve.\n\n Ouput:\n scores (`List[List[float]`): The retrieval scores of the retrieved examples.\n indices (`List[List[int]]`): The indices of the retrieved examples.\n \"\"\"\n response = self.es_client.search(\n index=self.es_index_name,\n body={\"query\": {\"multi_match\": {\"query\": query, \"fields\": [\"text\"], \"type\": \"cross_fields\"}}, \"size\": k},\n )\n hits = response[\"hits\"][\"hits\"]\n return SearchResults([hit[\"_score\"] for hit in hits], [int(hit[\"_id\"]) for hit in hits])\n\n def search_batch(self, queries, k: int = 10, max_workers=10) -> BatchedSearchResults:\n import concurrent.futures\n\n total_scores, total_indices = [None] * len(queries), [None] * len(queries)\n with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n future_to_index = {executor.submit(self.search, query, k): i for i, query in enumerate(queries)}\n for future in concurrent.futures.as_completed(future_to_index):\n index = future_to_index[future]\n results: SearchResults = future.result()\n total_scores[index] = results.scores\n total_indices[index] = results.indices\n return BatchedSearchResults(total_indices=total_indices, total_scores=total_scores)\n\n\nclass FaissIndex(BaseIndex):\n \"\"\"\n Dense index using Faiss. It is used to index vectors.\n Faiss is a library for efficient similarity search and clustering of dense vectors.\n It contains algorithms that search in sets of vectors of any size, up to ones that possibly do not fit in RAM.\n You can find more information about Faiss here:\n - For index types and the string factory: https://github.com/facebookresearch/faiss/wiki/The-index-factory\n - For GPU settings: https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU\n \"\"\"\n\n def __init__(\n self,\n device: Optional[int] = None,\n string_factory: Optional[str] = None,\n metric_type: Optional[int] = None,\n custom_index: Optional[\"faiss.Index\"] = None,\n ):\n \"\"\"\n Create a Dense index using Faiss. You can specify `device` if you want to run it on GPU (`device` must be the GPU index).\n You can find more information about Faiss here:\n - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory\n \"\"\"\n assert not (\n string_factory is not None and custom_index is not None\n ), \"Please specify either `string_factory` or `custom_index` but not both.\"\n self.device = device\n self.string_factory = string_factory\n self.metric_type = metric_type\n self.faiss_index = custom_index\n if not _has_faiss:\n raise ImportError(\n \"You must install Faiss to use FaissIndex. To do so you can run `pip install faiss-cpu` or `pip install faiss-gpu`\"\n )\n\n def add_vectors(\n self,\n vectors: Union[np.array, \"Dataset\"],\n column: Optional[str] = None,\n batch_size: int = 1000,\n train_size: Optional[int] = None,\n faiss_verbose: Optional[bool] = None,\n ):\n \"\"\"\n Add vectors to the index.\n If the arrays are inside a certain column, you can specify it using the `column` argument.\n \"\"\"\n import faiss # noqa: F811\n\n # Create index\n if self.faiss_index is None:\n size = len(vectors[0]) if column is None else len(vectors[0][column])\n if self.string_factory is not None:\n if self.metric_type is None:\n index = faiss.index_factory(size, self.string_factory)\n else:\n index = faiss.index_factory(size, self.string_factory, self.metric_type)\n else:\n if self.metric_type is None:\n index = faiss.IndexFlat(size)\n else:\n index = faiss.IndexFlat(size, self.metric_type)\n if self.device is not None and self.device > -1:\n self.faiss_res = faiss.StandardGpuResources()\n index = faiss.index_cpu_to_gpu(self.faiss_res, self.device, index)\n self.faiss_index = index\n logger.info(\"Created faiss index of type {}\".format(type(self.faiss_index)))\n\n # Set verbosity level\n if faiss_verbose is not None:\n self.faiss_index.verbose = faiss_verbose\n if hasattr(self.faiss_index, \"index\") and self.faiss_index.index is not None:\n self.faiss_index.index.verbose = faiss_verbose\n if hasattr(self.faiss_index, \"quantizer\") and self.faiss_index.quantizer is not None:\n self.faiss_index.quantizer.verbose = faiss_verbose\n if hasattr(self.faiss_index, \"clustering_index\") and self.faiss_index.clustering_index is not None:\n self.faiss_index.clustering_index.verbose = faiss_verbose\n\n # Train\n if train_size is not None:\n train_vecs = vectors[:train_size] if column is None else vectors[:train_size][column]\n logger.info(\"Training the index with the first {} vectors\".format(len(train_vecs)))\n self.faiss_index.train(train_vecs)\n else:\n logger.info(\"Ignored the training step of the faiss index as `train_size` is None.\")\n\n # Add vectors\n logger.info(\"Adding {} vectors to the faiss index\".format(len(vectors)))\n for i in utils.tqdm(\n range(0, len(vectors), batch_size), disable=bool(logging.get_verbosity() == logging.NOTSET)\n ):\n vecs = vectors[i : i + batch_size] if column is None else vectors[i : i + batch_size][column]\n self.faiss_index.add(vecs)\n\n def search(self, query: np.array, k=10) -> SearchResults:\n \"\"\"Find the nearest examples indices to the query.\n\n Args:\n query (`np.array`): The query as a numpy array.\n k (`int`): The number of examples to retrieve.\n\n Ouput:\n scores (`List[List[float]`): The retrieval scores of the retrieved examples.\n indices (`List[List[int]]`): The indices of the retrieved examples.\n \"\"\"\n assert len(query.shape) == 1 or (len(query.shape) == 2 and query.shape[0] == 1)\n queries = query.reshape(1, -1)\n if not queries.flags.c_contiguous:\n queries = np.asarray(queries, order=\"C\")\n scores, indices = self.faiss_index.search(queries, k)\n return SearchResults(scores[0], indices[0].astype(int))\n\n def search_batch(self, queries: np.array, k=10) -> BatchedSearchResults:\n \"\"\"Find the nearest examples indices to the queries.\n\n Args:\n queries (`np.array`): The queries as a numpy array.\n k (`int`): The number of examples to retrieve.\n\n Ouput:\n total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.\n total_indices (`List[List[int]]`): The indices of the retrieved examples per query.\n \"\"\"\n assert len(queries.shape) == 2\n if not queries.flags.c_contiguous:\n queries = np.asarray(queries, order=\"C\")\n scores, indices = self.faiss_index.search(queries, k)\n return BatchedSearchResults(scores, indices.astype(int))\n\n def save(self, file: Union[str, PurePath]):\n \"\"\"Serialize the FaissIndex on disk\"\"\"\n import faiss # noqa: F811\n\n if self.device is not None and self.device > -1:\n index = faiss.index_gpu_to_cpu(self.faiss_index)\n else:\n index = self.faiss_index\n\n faiss.write_index(index, str(file))\n\n @classmethod\n def load(\n cls,\n file: Union[str, PurePath],\n device: Optional[int] = None,\n ) -> \"FaissIndex\":\n \"\"\"Deserialize the FaissIndex from disk\"\"\"\n import faiss # noqa: F811\n\n faiss_index = cls(device=device)\n index = faiss.read_index(str(file))\n if faiss_index.device is not None and faiss_index.device > -1:\n faiss_index.faiss_res = faiss.StandardGpuResources()\n index = faiss.index_cpu_to_gpu(faiss_index.faiss_res, faiss_index.device, index)\n faiss_index.faiss_index = index\n return faiss_index\n\n\nclass IndexableMixin:\n \"\"\"Add indexing features to `datasets.Dataset`\"\"\"\n\n def __init__(self):\n self._indexes: Dict[str, BaseIndex] = {}\n\n def __len__(self):\n raise NotImplementedError\n\n def __getitem__(self, key):\n raise NotImplementedError\n\n def is_index_initialized(self, index_name: str) -> bool:\n return index_name in self._indexes\n\n def _check_index_is_initialized(self, index_name: str):\n if not self.is_index_initialized(index_name):\n raise MissingIndex(\n f\"Index with index_name '{index_name}' not initialized yet. Please make sure that you call `add_faiss_index` or `add_elasticsearch_index` first.\"\n )\n\n def list_indexes(self) -> List[str]:\n \"\"\"List the colindex_nameumns/identifiers of all the attached indexes.\"\"\"\n return list(self._indexes)\n\n def get_index(self, index_name: str) -> BaseIndex:\n \"\"\"List the index_name/identifiers of all the attached indexes.\n\n Args:\n index_name (:obj:`str`): Index name.\n\n Returns:\n :class:`BaseIndex`\n \"\"\"\n self._check_index_is_initialized(index_name)\n return self._indexes[index_name]\n\n def add_faiss_index(\n self,\n column: str,\n index_name: Optional[str] = None,\n device: Optional[int] = None,\n string_factory: Optional[str] = None,\n metric_type: Optional[int] = None,\n custom_index: Optional[\"faiss.Index\"] = None,\n train_size: Optional[int] = None,\n faiss_verbose: bool = False,\n ):\n \"\"\"Add a dense index using Faiss for fast retrieval.\n The index is created using the vectors of the specified column.\n You can specify `device` if you want to run it on GPU (`device` must be the GPU index).\n You can find more information about Faiss here:\n - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory\n\n Args:\n column (:obj:`str`): The column of the vectors to add to the index.\n index_name (Optional :obj:`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.\n By defaul it corresponds to `column`.\n device (Optional :obj:`int`): If not None, this is the index of the GPU to use. By default it uses the CPU.\n string_factory (Optional :obj:`str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.\n metric_type (Optional :obj:`int`): Type of metric. Ex: faiss.faiss.METRIC_INNER_PRODUCT or faiss.METRIC_L2.\n custom_index (Optional :obj:`faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.\n train_size (Optional :obj:`int`): If the index needs a training step, specifies how many vectors will be used to train the index.\n faiss_verbose (:obj:`bool`, defaults to False): Enable the verbosity of the Faiss index.\n \"\"\"\n index_name = index_name if index_name is not None else column\n faiss_index = FaissIndex(\n device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index\n )\n faiss_index.add_vectors(self, column=column, train_size=train_size, faiss_verbose=faiss_verbose)\n self._indexes[index_name] = faiss_index\n\n def add_faiss_index_from_external_arrays(\n self,\n external_arrays: np.array,\n index_name: str,\n device: Optional[int] = None,\n string_factory: Optional[str] = None,\n metric_type: Optional[int] = None,\n custom_index: Optional[\"faiss.Index\"] = None,\n train_size: Optional[int] = None,\n faiss_verbose: bool = False,\n ):\n \"\"\"Add a dense index using Faiss for fast retrieval.\n The index is created using the vectors of `external_arrays`.\n You can specify `device` if you want to run it on GPU (`device` must be the GPU index).\n You can find more information about Faiss here:\n - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory\n\n Args:\n external_arrays (:obj:`np.array`): If you want to use arrays from outside the lib for the index, you can set `external_arrays`.\n It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`.\n index_name (:obj:`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.\n device (Optional :obj:`int`): If not None, this is the index of the GPU to use. By default it uses the CPU.\n string_factory (Optional :obj:`str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.\n metric_type (Optional :obj:`int`): Type of metric. Ex: faiss.faiss.METRIC_INNER_PRODUCT or faiss.METRIC_L2.\n custom_index (Optional :obj:`faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.\n train_size (Optional :obj:`int`): If the index needs a training step, specifies how many vectors will be used to train the index.\n faiss_verbose (:obj:`bool`, defaults to False): Enable the verbosity of the Faiss index.\n \"\"\"\n faiss_index = FaissIndex(\n device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index\n )\n faiss_index.add_vectors(external_arrays, column=None, train_size=train_size, faiss_verbose=faiss_verbose)\n self._indexes[index_name] = faiss_index\n\n def save_faiss_index(self, index_name: str, file: Union[str, PurePath]):\n \"\"\"Save a FaissIndex on disk.\n\n Args:\n index_name (:obj:`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.\n file (:obj:`str`): The path to the serialized faiss index on disk.\n \"\"\"\n index = self.get_index(index_name)\n if not isinstance(index, FaissIndex):\n raise ValueError(\"Index '{}' is not a FaissIndex but a '{}'\".format(index_name, type(index)))\n index.save(file)\n logger.info(\"Saved FaissIndex {} at {}\".format(index_name, file))\n\n def load_faiss_index(\n self,\n index_name: str,\n file: Union[str, PurePath],\n device: Optional[int] = None,\n ):\n \"\"\"Load a FaissIndex from disk.\n\n If you want to do additional configurations, you can have access to the faiss index object by doing\n `.get_index(index_name).faiss_index` to make it fit your needs.\n\n Args:\n index_name (:obj:`str`): The index_name/identifier of the index. This is the index_name that is used to\n call `.get_nearest` or `.search`.\n file (:obj:`str`): The path to the serialized faiss index on disk.\n device (Optional :obj:`int`): If not None, this is the index of the GPU to use. By default it uses the CPU.\n \"\"\"\n index = FaissIndex.load(file, device=device)\n assert index.faiss_index.ntotal == len(\n self\n ), \"Index size should match Dataset size, but Index '{}' at {} has {} elements while the dataset has {} examples.\".format(\n index_name, file, index.faiss_index.ntotal, len(self)\n )\n self._indexes[index_name] = index\n logger.info(\"Loaded FaissIndex {} from {}\".format(index_name, file))\n\n def add_elasticsearch_index(\n self,\n column: str,\n index_name: Optional[str] = None,\n host: Optional[str] = None,\n port: Optional[int] = None,\n es_client: Optional[\"Elasticsearch\"] = None,\n es_index_name: Optional[str] = None,\n es_index_config: Optional[dict] = None,\n ):\n \"\"\"Add a text index using ElasticSearch for fast retrieval.\n\n Args:\n column (:obj:`str`): The column of the documents to add to the index.\n index_name (Optional :obj:`str`): The index_name/identifier of the index. This is the index name that is used to call `.get_nearest` or `.search`.\n By defaul it corresponds to `column`.\n host (Optional :obj:`str`, defaults to localhost):\n host of where ElasticSearch is running\n port (Optional :obj:`str`, defaults to 9200):\n port of where ElasticSearch is running\n es_client (Optional :obj:`elasticsearch.Elasticsearch`):\n The elasticsearch client used to create the index if host and port are None.\n es_index_name (Optional :obj:`str`): The elasticsearch index name used to create the index.\n es_index_config (Optional :obj:`dict`):\n The configuration of the elasticsearch index.\n Default config is:\n\n Config::\n\n {\n \"settings\": {\n \"number_of_shards\": 1,\n \"analysis\": {\"analyzer\": {\"stop_standard\": {\"type\": \"standard\", \" stopwords\": \"_english_\"}}},\n },\n \"mappings\": {\n \"properties\": {\n \"text\": {\n \"type\": \"text\",\n \"analyzer\": \"standard\",\n \"similarity\": \"BM25\"\n },\n }\n },\n }\n \"\"\"\n index_name = index_name if index_name is not None else column\n es_index = ElasticSearchIndex(\n host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config\n )\n es_index.add_documents(self, column=column)\n self._indexes[index_name] = es_index\n\n def load_elasticsearch_index(\n self,\n index_name: str,\n es_index_name: str,\n host: Optional[str] = None,\n port: Optional[int] = None,\n es_client: Optional[\"Elasticsearch\"] = None,\n es_index_config: Optional[dict] = None,\n ):\n \"\"\"Load an existing text index using ElasticSearch for fast retrieval.\n\n Args:\n index_name (:obj:`str`): The index_name/identifier of the index. This is the index name that is used to call `.get_nearest` or `.search`.\n es_index_name (:obj:`str`): The name of elasticsearch index to load.\n host (Optional :obj:`str`, defaults to localhost):\n host of where ElasticSearch is running\n port (Optional :obj:`str`, defaults to 9200):\n port of where ElasticSearch is running\n es_client (Optional :obj:`elasticsearch.Elasticsearch`):\n The elasticsearch client used to create the index if host and port are None.\n es_index_config (Optional :obj:`dict`):\n The configuration of the elasticsearch index.\n Default config is::\n\n {\n \"settings\": {\n \"number_of_shards\": 1,\n \"analysis\": {\"analyzer\": {\"stop_standard\": {\"type\": \"standard\", \" stopwords\": \"_english_\"}}},\n },\n \"mappings\": {\n \"properties\": {\n \"text\": {\n \"type\": \"text\",\n \"analyzer\": \"standard\",\n \"similarity\": \"BM25\"\n },\n }\n },\n }\n \"\"\"\n self._indexes[index_name] = ElasticSearchIndex(\n host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config\n )\n\n def drop_index(self, index_name: str):\n \"\"\"Drop the index with the specified column.\n\n Args:\n index_name (:obj:`str`): The index_name/identifier of the index.\n \"\"\"\n del self._indexes[index_name]\n\n def search(self, index_name: str, query: Union[str, np.array], k: int = 10) -> SearchResults:\n \"\"\"Find the nearest examples indices in the dataset to the query.\n\n Args:\n index_name (:obj:`str`): The name/identifier of the index.\n query (:obj:`Union[str, np.ndarray]`): The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.\n k (:obj:`int`): The number of examples to retrieve.\n\n Returns:\n scores (:obj:`List[List[float]`): The retrieval scores of the retrieved examples.\n indices (:obj:`List[List[int]]`): The indices of the retrieved examples.\n \"\"\"\n self._check_index_is_initialized(index_name)\n return self._indexes[index_name].search(query, k)\n\n def search_batch(self, index_name: str, queries: Union[List[str], np.array], k: int = 10) -> BatchedSearchResults:\n \"\"\"Find the nearest examples indices in the dataset to the query.\n\n Args:\n index_name (:obj:`str`): The index_name/identifier of the index.\n queries (:obj:`Union[List[str], np.ndarray]`): The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.\n k (:obj:`int`): The number of examples to retrieve per query.\n\n Returns:\n total_scores (:obj:`List[List[float]`): The retrieval scores of the retrieved examples per query.\n total_indices (:obj:`List[List[int]]`): The indices of the retrieved examples per query.\n \"\"\"\n self._check_index_is_initialized(index_name)\n return self._indexes[index_name].search_batch(queries, k)\n\n def get_nearest_examples(\n self, index_name: str, query: Union[str, np.array], k: int = 10\n ) -> NearestExamplesResults:\n \"\"\"Find the nearest examples in the dataset to the query.\n\n Args:\n index_name (:obj:`str`): The index_name/identifier of the index.\n query (:obj:`Union[str, np.ndarray]`): The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.\n k (:obj:`int`): The number of examples to retrieve.\n\n Returns:\n scores (:obj:`List[float]`): The retrieval scores of the retrieved examples.\n examples (:obj:`dict`): The retrieved examples.\n \"\"\"\n self._check_index_is_initialized(index_name)\n scores, indices = self.search(index_name, query, k)\n return NearestExamplesResults(scores, self[[i for i in indices if i >= 0]])\n\n def get_nearest_examples_batch(\n self, index_name: str, queries: Union[List[str], np.array], k: int = 10\n ) -> BatchedNearestExamplesResults:\n \"\"\"Find the nearest examples in the dataset to the query.\n\n Args:\n index_name (:obj:`str`): The index_name/identifier of the index.\n queries (:obj:`Union[List[str], np.ndarray]`): The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.\n k (:obj:`int`): The number of examples to retrieve per query.\n\n Returns:\n total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.\n total_examples (`List[dict]`): The retrieved examples per query.\n \"\"\"\n self._check_index_is_initialized(index_name)\n total_scores, total_indices = self.search_batch(index_name, queries, k)\n return BatchedNearestExamplesResults(\n total_scores, [self[[i for i in indices if i >= 0]] for indices in total_indices]\n )\n" ]
[ [ "numpy.asarray" ] ]
xandaschofield/bptf
[ "de8ac77a89fadb01aceb26d14b4366c8afab2b87" ]
[ "code/utils.py" ]
[ "import numpy as np\nimport numpy.random as rn\nimport sktensor as skt\n\nimport pickle\nfrom pathlib import Path as path\nfrom time import sleep\n\n\ndef is_binary(X):\n \"\"\"Checks whether input is a binary integer tensor.\"\"\"\n if np.issubdtype(X.dtype, int):\n if isinstance(X, skt.sptensor):\n return (X.vals == 1).all()\n else:\n return (X <= 1).all() and (X >= 0).all()\n else:\n return False\n\n\ndef is_sparse(X):\n \"\"\"Checks whether input tensor is sparse.\n\n Implements a heuristic definition of sparsity.\n\n A tensor is considered sparse if:\n\n M = number of modes\n S = number of entries\n I = number of non-zero entries\n\n N > M(I + 1)\n \"\"\"\n M = X.ndim\n S = X.size\n I = X.nonzero()[0].size\n return S > (I + 1) * M\n\n\ndef sptensor_from_dense_array(X):\n \"\"\"Creates an sptensor from an ndarray or dtensor.\"\"\"\n subs = X.nonzero()\n vals = X[subs]\n return skt.sptensor(subs, vals, shape=X.shape, dtype=X.dtype)\n\n\ndef preprocess(X):\n \"\"\"Preprocesses input dense data tensor.\n\n If data is sparse, returns an int sptensor.\n Otherwise, returns an int dtensor.\n \"\"\"\n if isinstance(X, skt.sptensor):\n if not np.issubdtype(X.dtype, int):\n X.vals = X.vals.astype(int)\n X.dtype = int\n return X\n else:\n if not np.issubdtype(X.dtype, int):\n X = X.astype(int)\n if is_sparse(X):\n return sptensor_from_dense_array(X)\n else:\n if not isinstance(X, skt.dtensor):\n return skt.dtensor(X)\n\n\ndef uttkrp(X, m, U):\n \"\"\"\n Alternative implementation of uttkrp in sktensor library.\n\n The description of that method is copied below:\n\n Unfolded tensor times Khatri-Rao product:\n :math:`Z = \\\\unfold{X}{3} (U_1 \\kr \\cdots \\kr U_N)`\n Computes the _matrix_ product of the unfolding\n of a tensor and the Khatri-Rao product of multiple matrices.\n Efficient computations are perfomed by the respective\n tensor implementations.\n Parameters\n ----------\n U : list of array-likes\n Matrices for which the Khatri-Rao product is computed and\n which are multiplied with the tensor in mode `mode`.\n m : int\n Mode in which the Khatri-Rao product of `U` is multiplied\n with the tensor.\n Returns\n -------\n Z : np.ndarray\n Matrix which is the result of the matrix product of the unfolding of\n the tensor and the Khatri-Rao product of `U`.\n See also\n --------\n For efficient computations of unfolded tensor times Khatri-Rao products\n for specialiized tensors see also\n dtensor.uttkrp, sptensor.uttkrp, ktensor.uttkrp, ttensor.uttkrp\n References\n ----------\n [1] B.W. Bader, T.G. Kolda\n Efficient Matlab Computations With Sparse and Factored Tensors\n SIAM J. Sci. Comput, Vol 30, No. 1, pp. 205--231, 2007\n \"\"\"\n D, K = U[m].shape\n Z = np.rollaxis(X, m, 0)\n order = range(X.ndim)\n order.remove(m)\n Z = np.dot(Z, U[order[-1]])\n for mode in reversed(order[:-1]):\n Z *= U[mode]\n Z = Z.sum(axis=-2)\n return Z\n\n\ndef sp_uttkrp(vals, subs, m, U):\n \"\"\"Alternative implementation of the sparse version of the uttkrp.\n ----------\n subs : n-tuple of array-likes\n Subscripts of the nonzero entries in the tensor.\n Length of tuple n must be equal to dimension of tensor.\n vals : array-like\n Values of the nonzero entries in the tensor.\n U : list of array-likes\n Matrices for which the Khatri-Rao product is computed and\n which are multiplied with the tensor in mode `mode`.\n m : int\n Mode in which the Khatri-Rao product of `U` is multiplied\n with the tensor.\n Returns\n -------\n out : np.ndarray\n Matrix which is the result of the matrix product of the unfolding of\n the tensor and the Khatri-Rao product of `U`.\n \"\"\"\n D, K = U[m].shape\n out = np.zeros_like(U[m])\n for k in xrange(K):\n tmp = vals.copy()\n for mode, matrix in enumerate(U):\n if mode == m:\n continue\n tmp *= matrix[subs[mode], k]\n out[:, k] += np.bincount(subs[m],\n weights=tmp,\n minlength=D)\n return out\n\n\ndef parafac(matrices, axis=None):\n \"\"\"Computes the PARAFAC of a set of matrices.\n\n For a set of N matrices,\n\n U = {U1, U2, ..., UN}\n\n where Ui is size (Di X K),\n\n PARAFAC(U) is defined as the sum of column-wise outer-products:\n\n PARAFAC(U) = \\sum_k U1(:, k) \\circ \\dots \\circ UN(:, k)\n\n and results in a tensor of size D1 x ... x DN.\n\n Calls np.einsum repeatedly (instead of all at once) to avoid memory\n usage problems that occur when too many matrices are passed to einsum.\n\n Parameters\n ----------\n matrices : list of array-likes\n Matrices for which the PARAFAC is computed\n axis : int, optional\n The axis along which all matrices have the same dimensionality.\n Either 0 or 1. If set to None, it will check which axis\n all matrices agree on. If matrices are square, it defaults to 1.\n Returns\n -------\n out : np.ndarray\n ndarray which is the result of the PARAFAC\n \"\"\"\n assert len(matrices) > 1\n if axis is None:\n N, M = matrices[0].shape\n axis_0_all_equal = all([X.shape[0] == N for X in matrices[1:]])\n axis_1_all_equal = all([X.shape[1] == M for X in matrices[1:]])\n if axis_1_all_equal:\n axis = 1\n elif axis_0_all_equal:\n axis = 0\n else:\n raise ValueError('Matrices not aligned.')\n\n if len(matrices) == 2:\n s = 'za,zb->ab' if axis == 0 else 'az,bz->ab'\n return np.einsum(s, matrices[0], matrices[1])\n else:\n s = 'za,zb->zab' if axis == 0 else 'az,bz->abz'\n tmp = np.einsum(s, matrices[0], matrices[1])\n curr = 'ab'\n\n letters = list('cdefghijklmnopqrstuv')\n for matrix in matrices[2:-1]:\n ltr = letters.pop(0)\n if axis == 0:\n s = 'z%s,z%s->z%s%s' % (curr, ltr, curr, ltr)\n else:\n s = '%sz,%sz->%s%sz' % (curr, ltr, curr, ltr)\n tmp = np.einsum(s, tmp, matrix)\n curr += ltr\n\n ltr = letters.pop(0)\n if axis == 0:\n s = 'z%s,z%s->%s%s' % (curr, ltr, curr, ltr)\n else:\n s = '%sz,%sz->%s%s' % (curr, ltr, curr, ltr)\n return np.einsum(s, tmp, matrices[-1])\n\n\ndef serialize_bptf(model, out_dir, num=None, desc=None):\n if desc is None:\n desc = 'model'\n out_dir = path(out_dir)\n assert out_dir.exists()\n\n if num is None:\n sleep(rn.random() * 5)\n curr_files = out_dir.files('*_%s.npz' % desc)\n curr_nums = [int(f.namebase.split('_')[0]) for f in curr_files]\n num = max(curr_nums + [0]) + 1\n\n with open(out_dir.joinpath('%d_%s.dat' % (num, desc)), 'wb') as f:\n pickle.dump(model.get_params(), f)\n\n out_path = out_dir.joinpath('%d_%s.npz' % (num, desc))\n np.savez(out_path,\n E_DK_M=model.E_DK_M,\n G_DK_M=model.G_DK_M,\n gamma_DK_M=model.gamma_DK_M,\n delta_DK_M=model.delta_DK_M,\n beta_M=model.beta_M)\n print(out_path)\n return num\n" ]
[ [ "numpy.rollaxis", "numpy.dot", "numpy.savez", "numpy.random.random", "numpy.einsum", "numpy.issubdtype", "numpy.zeros_like", "numpy.bincount" ] ]
saifrahmed/tfx-local
[ "3b0374706dc5189d712e5120218f66ab44bea821" ]
[ "tfx/components/trainer/executor.py" ]
[ "# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Generic TFX trainer executor.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nimport tensorflow_model_analysis as tfma\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Text\n\nfrom tensorflow_metadata.proto.v0 import schema_pb2\nfrom tfx.components.base import base_executor\nfrom tfx.orchestration.gcp import cmle_runner\nfrom tfx.proto import trainer_pb2\nfrom tfx.utils import io_utils\nfrom tfx.utils import path_utils\nfrom tfx.utils import types\nfrom google.protobuf import json_format\n\n\ndef _all_files_pattern(file_pattern):\n return '{}*'.format(file_pattern)\n\n\nclass Executor(base_executor.BaseExecutor):\n \"\"\"Generic TFX trainer executor.\"\"\"\n\n _CHECKPOINT_FILE_NAME = 'checkpoint'\n\n def Do(self, input_dict,\n output_dict,\n exec_properties):\n \"\"\"Runs trainer job the given input.\n\n Args:\n input_dict: Input dict from input key to a list of Artifacts.\n - transformed_examples: Transformed example.\n - transform_output: Input transform graph.\n - schema: Schema of the data.\n output_dict: Output dict from output key to a list of Artifacts.\n - output: Exported model.\n exec_properties: A dict of execution properties.\n - train_args: JSON string of trainer_pb2.TrainArgs instance, providing\n args for training.\n - eval_args: JSON string of trainer_pb2.EvalArgs instance, providing\n args for eval.\n - module_file: Python module file containing UDF model definition.\n - warm_starting: Whether or not we need to do warm starting.\n - warm_start_from: Optional. If warm_starting is True, this is the\n directory to find previous model to warm start on.\n\n Returns:\n None\n \"\"\"\n self._log_startup(input_dict, output_dict, exec_properties)\n\n # TODO(khaas): Move this to tfx/extensions.\n if exec_properties.get('custom_config', None):\n cmle_args = exec_properties.get('custom_config',\n {}).get('cmle_training_args')\n if cmle_args:\n executor_class_path = '.'.join([Executor.__module__, Executor.__name__])\n return cmle_runner.start_cmle_training(input_dict, output_dict,\n exec_properties,\n executor_class_path, cmle_args)\n\n trainer_fn = io_utils.import_func(exec_properties['module_file'],\n 'trainer_fn')\n\n # Set up training parameters\n train_files = [\n _all_files_pattern(\n types.get_split_uri(input_dict['transformed_examples'], 'train'))\n ]\n transform_output = types.get_single_uri(input_dict['transform_output'])\n eval_files = _all_files_pattern(\n types.get_split_uri(input_dict['transformed_examples'], 'eval'))\n schema_file = io_utils.get_only_uri_in_dir(\n types.get_single_uri(input_dict['schema']))\n\n train_args = trainer_pb2.TrainArgs()\n eval_args = trainer_pb2.EvalArgs()\n json_format.Parse(exec_properties['train_args'], train_args)\n json_format.Parse(exec_properties['eval_args'], eval_args)\n\n # https://github.com/tensorflow/tfx/issues/45: Replace num_steps=0 with\n # num_steps=None. Conversion of the proto to python will set the default\n # value of an int as 0 so modify the value here. Tensorflow will raise an\n # error if num_steps <= 0.\n train_steps = train_args.num_steps or None\n eval_steps = eval_args.num_steps or None\n\n output_path = types.get_single_uri(output_dict['output'])\n serving_model_dir = path_utils.serving_model_dir(output_path)\n eval_model_dir = path_utils.eval_model_dir(output_path)\n\n # Assemble warm start path if needed.\n warm_start_from = None\n if exec_properties.get('warm_starting') and exec_properties.get(\n 'warm_start_from'):\n previous_model_dir = os.path.join(exec_properties['warm_start_from'],\n path_utils.SERVING_MODEL_DIR)\n if previous_model_dir and tf.gfile.Exists(\n os.path.join(previous_model_dir, self._CHECKPOINT_FILE_NAME)):\n warm_start_from = previous_model_dir\n\n # TODO(b/126242806) Use PipelineInputs when it is available in third_party.\n hparams = tf.contrib.training.HParams(\n train_files=train_files,\n transform_output=transform_output,\n output_dir=output_path,\n serving_model_dir=serving_model_dir,\n eval_files=eval_files,\n schema_file=schema_file,\n train_steps=train_steps,\n eval_steps=eval_steps,\n warm_start_from=warm_start_from)\n\n schema = io_utils.parse_pbtxt_file(schema_file, schema_pb2.Schema())\n\n training_spec = trainer_fn(hparams, schema)\n\n # Train the model\n tf.logging.info('Training model.')\n tf.estimator.train_and_evaluate(training_spec['estimator'],\n training_spec['train_spec'],\n training_spec['eval_spec'])\n tf.logging.info('Training complete. Model written to %s',\n serving_model_dir)\n\n # Export an eval savedmodel for TFMA\n tf.logging.info('Exporting eval_savedmodel for TFMA.')\n tfma.export.export_eval_savedmodel(\n estimator=training_spec['estimator'],\n export_dir_base=eval_model_dir,\n eval_input_receiver_fn=training_spec['eval_input_receiver_fn'])\n\n tf.logging.info('Exported eval_savedmodel to %s.', eval_model_dir)\n" ]
[ [ "tensorflow.estimator.train_and_evaluate", "tensorflow.logging.info", "tensorflow.contrib.training.HParams" ] ]
TheShadow29/btp-cv
[ "e468a1baf36364096fb7cb4ff854a745298cec12" ]
[ "code/cnn_ecg/lib/grid_graph.py" ]
[ "import sklearn\nimport sklearn.metrics\nimport scipy.sparse, scipy.sparse.linalg # scipy.spatial.distance\nimport numpy as np\n\n\ndef grid_graph(grid_side,number_edges,metric):\n \"\"\"Generate graph of a grid\"\"\"\n z = grid(grid_side)\n dist, idx = distance_sklearn_metrics(z, k=number_edges, metric=metric)\n A = adjacency(dist, idx)\n print(\"nb edges: \",A.nnz)\n return A\n\n\ndef grid(m, dtype=np.float32):\n \"\"\"Return coordinates of grid points\"\"\"\n M = m**2\n x = np.linspace(0,1,m, dtype=dtype)\n y = np.linspace(0,1,m, dtype=dtype)\n xx, yy = np.meshgrid(x, y)\n z = np.empty((M,2), dtype)\n z[:,0] = xx.reshape(M)\n z[:,1] = yy.reshape(M)\n return z\n\n\ndef distance_sklearn_metrics(z, k=4, metric='euclidean'):\n \"\"\"Compute pairwise distances\"\"\"\n #d = sklearn.metrics.pairwise.pairwise_distances(z, metric=metric, n_jobs=-2)\n d = sklearn.metrics.pairwise.pairwise_distances(z, metric=metric, n_jobs=1)\n # k-NN\n idx = np.argsort(d)[:,1:k+1]\n d.sort()\n d = d[:,1:k+1]\n return d, idx\n\n\ndef adjacency(dist, idx):\n \"\"\"Return adjacency matrix of a kNN graph\"\"\"\n M, k = dist.shape\n assert M, k == idx.shape\n assert dist.min() >= 0\n assert dist.max() <= 1\n\n # Pairwise distances\n sigma2 = np.mean(dist[:,-1])**2\n dist = np.exp(- dist**2 / sigma2)\n\n # Weight matrix\n I = np.arange(0, M).repeat(k)\n J = idx.reshape(M*k)\n V = dist.reshape(M*k)\n W = scipy.sparse.coo_matrix((V, (I, J)), shape=(M, M))\n\n # No self-connections\n W.setdiag(0)\n\n # Undirected graph\n bigger = W.T > W\n W = W - W.multiply(bigger) + W.T.multiply(bigger)\n\n assert W.nnz % 2 == 0\n assert np.abs(W - W.T).mean() < 1e-10\n assert type(W) is scipy.sparse.csr.csr_matrix\n return W\n" ]
[ [ "numpy.abs", "numpy.meshgrid", "numpy.linspace", "numpy.arange", "numpy.mean", "numpy.argsort", "numpy.exp", "sklearn.metrics.pairwise.pairwise_distances", "numpy.empty" ] ]
zaza0209/abess
[ "42e5b7180383c450a3355219f3dd8cc8567d4a00" ]
[ "python/abess/bess_base.py" ]
[ "import numbers\nimport numpy as np\nfrom scipy.sparse import coo_matrix\nfrom sklearn.utils.validation import check_X_y, check_array, check_is_fitted\nfrom sklearn.base import BaseEstimator\nfrom .cabess import *\n\n\nclass bess_base(BaseEstimator):\n \"\"\"\n Parameters\n ----------\n max_iter : int, optional\n Maximum number of iterations taken for the splicing algorithm to converge.\n Due to the limitation of loss reduction, the splicing algorithm must be able to converge.\n The number of iterations is only to simplify the implementation.\n Default: max_iter = 20.\n is_warm_start : bool, optional\n When tuning the optimal parameter combination, whether to use the last solution as a warm start to accelerate the iterative convergence of the splicing algorithm.\n Default:is_warm_start = True.\n path_type : {\"seq\", \"gs\"}\n The method to be used to select the optimal support size.\n For path_type = \"seq\", we solve the best subset selection problem for each size in support_size.\n For path_type = \"gs\", we solve the best subset selection problem with support size ranged in (s_min, s_max), where the specific support size to be considered is determined by golden section.\n support_size : array_like, optional\n An integer vector representing the alternative support sizes. Only used for path_type = \"seq\".\n Default is 0:min(n, round(n/(log(log(n))log(p)))).\n s_min : int, optional\n The lower bound of golden-section-search for sparsity searching.\n Default: s_min = 1.\n s_max : int, optional\n The higher bound of golden-section-search for sparsity searching.\n Default: s_max = min(n, round(n/(log(log(n))log(p)))).\n ic_type : {'aic', 'bic', 'gic', 'ebic'}, optional\n The type of criterion for choosing the support size. Available options are \"gic\", \"ebic\", \"bic\", \"aic\".\n Default: ic_type = 'ebic'.\n cv : int, optional\n The folds number when Use the Cross-validation method. If cv=1, cross-validation would not be used.\n Default: cv = 1.\n thread: int, optional\n Max number of multithreads. If thread = 0, the program will use the maximum number supported by the device.\n Default: thread = 1.\n screening_size: int, optional\n The number of variables remaining after screening.\n It should be a non-negative number smaller than p, but larger than any value in support\\\\_size.\n If screening_size=-1, screening will not be used.\n If screening_size=0, screening_size will be set as min(p, int(n / (np.log(np.log(n)) * np.log(p)))).\n Default: screening_size = -1.\n always_select: array_like, optional\n An array contains the indexes of variables we want to consider in the model.\n Default: always_select = [].\n primary_model_fit_max_iter: int, optional\n The maximal number of iteration in `primary_model_fit()` (in Algorithm.h).\n Default: primary_model_fit_max_iter = 10.\n primary_model_fit_epsilon: double, optional\n The epsilon (threshold) of iteration in `primary_model_fit()` (in Algorithm.h).\n Default: primary_model_fit_max_iter = 1e-08.\n\n Returns\n -------\n coef_: array of shape (n_features, ) or (n_targets, n_features)\n Estimated coefficients for the best subset selection problem.\n ic_: double\n The score under chosen information criterion.\n\n References\n ----------\n - Junxian Zhu, Canhong Wen, Jin Zhu, Heping Zhang, and Xueqin Wang. A polynomial algorithm for best-subset selection problem. Proceedings of the National Academy of Sciences, 117(52):33117-33123, 2020.\n\n\n \"\"\"\n\n def __init__(self, algorithm_type, model_type, normalize_type, path_type, max_iter=20, exchange_num=5, is_warm_start=True,\n support_size=None, alpha=None, s_min=None, s_max=None,\n ic_type=\"ebic\", ic_coef=1.0,\n cv=1, screening_size=-1,\n always_select=None,\n primary_model_fit_max_iter=10, primary_model_fit_epsilon=1e-8,\n approximate_Newton=False,\n thread=1,\n covariance_update=False,\n sparse_matrix=False,\n splicing_type=0,\n important_search=0,\n # early_stop=False, lambda_min=None, lambda_max=None,\n # n_lambda=100,\n ):\n self.algorithm_type = algorithm_type\n self.model_type = model_type\n self.normalize_type = normalize_type\n self.path_type = path_type\n self.max_iter = max_iter\n self.exchange_num = exchange_num\n self.is_warm_start = is_warm_start\n self.support_size = support_size\n self.alpha = alpha\n self.n_features_in_ = 0\n self.s_min = s_min\n self.s_max = s_max\n self.lambda_min = None\n self.lambda_max = None\n self.n_lambda = 100\n self.ic_type = ic_type\n self.ic_coef = ic_coef\n self.cv = cv\n self.screening_size = screening_size\n self.always_select = always_select\n self.primary_model_fit_max_iter = primary_model_fit_max_iter\n self.primary_model_fit_epsilon = primary_model_fit_epsilon\n self.early_stop = False\n self.approximate_Newton = approximate_Newton\n self.thread = thread\n self.covariance_update = covariance_update\n self.sparse_matrix = sparse_matrix\n self.splicing_type = splicing_type\n self.important_search = important_search\n # output\n self.coef_ = None\n self.intercept_ = None\n self.train_loss_ = 0\n self.ic_ = 0\n\n def new_data_check(self, X, y=None, weights=None):\n # Check1 : whether fit had been called\n check_is_fitted(self)\n\n # Check2 : X validation\n X = check_array(X, accept_sparse=True)\n if X.shape[1] != self.n_features_in_:\n raise ValueError(\"X.shape[1] should be \" +\n str(self.n_features_in_))\n\n # Check3 : X, y validation\n if (y is not None) and (weights is None):\n X, y = check_X_y(X, y, accept_sparse=True,\n multi_output=True, y_numeric=True)\n return X, y\n\n # Check4: X, y, weights validation\n if weights is not None:\n X, y = check_X_y(X, y, accept_sparse=True,\n multi_output=True, y_numeric=True)\n weights = np.array(weights, dtype=np.float)\n\n if len(weights.shape) != 1:\n raise ValueError(\"weights should be 1-dimension.\")\n if weights.shape[0] != X.shape[0]:\n raise ValueError(\"weights should have a length of X.shape[0].\")\n return X, y, weights\n\n return X\n\n def fit(self, X=None, y=None, is_normal=True,\n weight=None, group=None, cv_fold_id=None, A_init=None):\n \"\"\"\n The fit function is used to transfer the information of data and return the fit result.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, p_features)\n Training data\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values. Will be cast to X's dtype if necessary.\n For linear regression problem, y should be a n time 1 numpy array with type \\\\code{double}.\n For classification problem, \\\\code{y} should be a $n \\time 1$ numpy array with values \\\\code{0} or \\\\code{1}.\n For count data, \\\\code{y} should be a $n \\time 1$ numpy array of non-negative integer.\n is_normal : bool, optional\n whether normalize the variables array before fitting the algorithm.\n Default: is_normal=True.\n weight : array-like of shape (n_samples,)\n Individual weights for each sample. Only used for is_weight=True.\n Default: weight = 1 for each observation.\n group : int, optional\n The group index for each variable.\n Default: group = \\\\code{numpy.ones(p)}.\n cv_fold_id: array_like of shape (n_samples,) , optional\n An array indicates different folds in CV. Samples in the same fold should be given the same number.\n Default: cv_fold_id=None\n \"\"\"\n\n # print(\"fit enter.\")#///\n\n # Input check & init:\n if isinstance(X, (list, np.ndarray, np.matrix, coo_matrix)):\n if isinstance(X, coo_matrix):\n self.sparse_matrix = True\n\n # Check that X and y have correct shape\n X, y = check_X_y(X, y, accept_sparse=True,\n multi_output=True, y_numeric=True, dtype='numeric')\n\n # Sort for Cox\n if self.model_type == \"Cox\":\n X = X[y[:, 0].argsort()]\n y = y[y[:, 0].argsort()]\n y = y[:, 1].reshape(-1)\n\n # Init\n n = X.shape[0]\n p = X.shape[1]\n self.n_features_in_ = p\n\n if y.ndim == 1:\n M = 1\n y = y.reshape(len(y), 1)\n else:\n M = y.shape[1]\n else:\n raise ValueError(\"X should be a matrix or sparse matrix.\")\n\n # Algorithm_type: abess\n if self.algorithm_type == \"abess\":\n algorithm_type_int = 6\n else:\n raise ValueError(\"algorithm_type should not be \" +\n str(self.algorithm_type))\n\n # Model_type: lm, logit, poiss, cox, multi-gaussian, multi-nomial\n if self.model_type == \"Lm\":\n model_type_int = 1\n elif self.model_type == \"Logistic\":\n model_type_int = 2\n elif self.model_type == \"Poisson\":\n model_type_int = 3\n elif self.model_type == \"Cox\":\n model_type_int = 4\n elif self.model_type == \"Multigaussian\":\n model_type_int = 5\n elif self.model_type == \"Multinomial\":\n model_type_int = 6\n elif self.model_type == 'Gamma':\n model_type_int = 8\n else:\n raise ValueError(\"model_type should not be \" +\n str(self.model_type))\n\n # Path_type: seq, gs\n if self.path_type == \"seq\":\n path_type_int = 1\n elif self.path_type == \"gs\":\n path_type_int = 2\n else:\n raise ValueError(\"path_type should be \\'seq\\' or \\'gs\\'\")\n\n # Ic_type: aic, bic, gic, ebic\n if self.ic_type == \"aic\":\n ic_type_int = 1\n elif self.ic_type == \"bic\":\n ic_type_int = 2\n elif self.ic_type == \"gic\":\n ic_type_int = 3\n elif self.ic_type == \"ebic\":\n ic_type_int = 4\n else:\n raise ValueError(\n \"ic_type should be \\\"aic\\\", \\\"bic\\\", \\\"ebic\\\" or \\\"gic\\\"\")\n\n # cv\n if (not isinstance(self.cv, int) or self.cv <= 0):\n raise ValueError(\"cv should be an positive integer.\")\n if self.cv > n:\n raise ValueError(\"cv should be smaller than n.\")\n\n # cv_fold_id\n if cv_fold_id is None:\n cv_fold_id = np.array([], dtype=\"int32\")\n else:\n cv_fold_id = np.array(cv_fold_id, dtype=\"int32\")\n if cv_fold_id.ndim > 1:\n raise ValueError(\"group should be an 1D array of integers.\")\n if cv_fold_id.size != n:\n raise ValueError(\n \"The length of group should be equal to X.shape[0].\")\n if len(set(cv_fold_id)) != self.cv:\n raise ValueError(\n \"The number of different masks should be equal to `cv`.\")\n\n # A_init\n if A_init is None:\n A_init = np.array([], dtype=\"int32\")\n else:\n A_init = np.array(A_init, dtype=\"int32\")\n if A_init.ndim > 1:\n raise ValueError(\n \"The initial active set should be an 1D array of integers.\")\n if (A_init.min() < 0 or A_init.max() >= p):\n raise ValueError(\n \"A_init contains wrong index.\")\n\n # Group:\n if group is None:\n g_index = list(range(p))\n else:\n group = np.array(group)\n if group.ndim > 1:\n raise ValueError(\"group should be an 1D array of integers.\")\n if group.size != p:\n raise ValueError(\n \"The length of group should be equal to X.shape[1].\")\n g_index = []\n group.sort()\n group_set = list(set(group))\n j = 0\n for i in group_set:\n while group[j] != i:\n j += 1\n g_index.append(j)\n\n # Weight:\n if weight is None:\n weight = np.ones(n)\n else:\n weight = np.array(weight)\n if weight.dtype not in (\"int\", \"float\"):\n raise ValueError(\"weight should be numeric.\")\n if weight.ndim > 1:\n raise ValueError(\"weight should be a 1-D array.\")\n if weight.size != n:\n raise ValueError(\"X.shape[0] should be equal to weight.size\")\n\n # Path parameters\n if path_type_int == 1: # seq\n if self.support_size is None:\n if (n == 1 or p == 1):\n support_sizes = [0, 1]\n else:\n support_sizes = list(range(0, max(min(p, int(\n n / (np.log(np.log(n)) * np.log(p)))), 1)))\n else:\n if isinstance(self.support_size,\n (numbers.Real, numbers.Integral)):\n support_sizes = np.empty(1, dtype=int)\n support_sizes[0] = self.support_size\n elif (np.any(np.array(self.support_size) > p) or\n np.any(np.array(self.support_size) < 0)):\n raise ValueError(\n \"All support_size should be between 0 and X.shape[1]\")\n else:\n support_sizes = self.support_size\n\n if self.alpha is None:\n alphas = [0]\n else:\n if isinstance(self.alpha, (numbers.Real, numbers.Integral)):\n alphas = np.empty(1, dtype=float)\n alphas[0] = self.alpha\n else:\n alphas = self.alpha\n\n # unused\n new_s_min = 0\n new_s_max = 0\n new_lambda_min = 0\n new_lambda_max = 0\n\n elif path_type_int == 2: # gs\n new_s_min = 0 \\\n if self.s_min is None else self.s_min\n new_s_max = min(p, int(n / (np.log(np.log(n)) * np.log(p)))) \\\n if self.s_max is None else self.s_max\n new_lambda_min = 0 # \\\n # if self.lambda_min is None else self.lambda_min\n new_lambda_max = 0 # \\\n # if self.lambda_max is None else self.lambda_max\n\n if new_s_max < new_s_min:\n raise ValueError(\"s_max should be larger than s_min\")\n # if new_lambda_max < new_lambda_min:\n # raise ValueError(\n # \"lambda_max should be larger than lambda_min.\")\n\n # unused\n support_sizes = [0]\n alphas = [0]\n support_sizes = np.array(support_sizes, dtype='int32')\n\n # Exchange_num\n if (not isinstance(self.exchange_num, int) or self.exchange_num <= 0):\n raise ValueError(\"exchange_num should be an positive integer.\")\n # elif (self.exchange_num > min(support_sizes)):\n # print(\"[Warning] exchange_num may be larger than sparsity, and it would be set up to sparsity.\")\n\n # screening\n if self.screening_size != -1:\n if self.screening_size == 0:\n self.screening_size = min(\n p, int(n / (np.log(np.log(n)) * np.log(p))))\n elif self.screening_size > p:\n raise ValueError(\n \"screening size should be smaller than X.shape[1].\")\n elif self.screening_size < max(support_sizes):\n raise ValueError(\n \"screening size should be more than max(support_size).\")\n\n # Primary fit parameters\n if (not isinstance(self.primary_model_fit_max_iter, int)\n or self.primary_model_fit_max_iter <= 0):\n raise ValueError(\n \"primary_model_fit_max_iter should be an positive integer.\")\n if self.primary_model_fit_epsilon < 0:\n raise ValueError(\n \"primary_model_fit_epsilon should be non-negative.\")\n\n # Thread\n if (not isinstance(self.thread, int) or self.thread < 0):\n raise ValueError(\n \"thread should be positive number or 0 (maximum supported by your device).\")\n\n # Splicing type\n if self.splicing_type not in (0, 1):\n raise ValueError(\"splicing type should be 0 or 1.\")\n\n # Important_search\n if (not isinstance(self.important_search, int)\n or self.important_search < 0):\n raise ValueError(\n \"important_search should be a non-negative number.\")\n\n # Sparse X\n if self.sparse_matrix:\n if not isinstance(X, type(coo_matrix((1, 1)))):\n # print(\"sparse matrix 1\")\n nonzero = 0\n tmp = np.zeros([X.shape[0] * X.shape[1], 3])\n for j in range(X.shape[1]):\n for i in range(X.shape[0]):\n if X[i, j] != 0.:\n tmp[nonzero, :] = np.array([X[i, j], i, j])\n nonzero += 1\n X = tmp[:nonzero, :]\n else:\n # print(\"sparse matrix 2\")\n tmp = np.zeros([len(X.data), 3])\n tmp[:, 1] = X.row\n tmp[:, 2] = X.col\n tmp[:, 0] = X.data\n\n ind = np.lexsort((tmp[:, 2], tmp[:, 1]))\n X = tmp[ind, :]\n\n # normalize\n normalize = 0\n if is_normal:\n normalize = self.normalize_type\n\n # always_select\n if self.always_select is None:\n self.always_select = []\n\n # wrap with cpp\n # print(\"wrap enter.\")#///\n result = pywrap_GLM(X, y, weight,\n n, p, normalize,\n algorithm_type_int, model_type_int, self.max_iter, self.exchange_num,\n path_type_int, self.is_warm_start,\n ic_type_int, self.ic_coef, self.cv,\n g_index,\n support_sizes,\n alphas,\n cv_fold_id,\n new_s_min, new_s_max,\n new_lambda_min, new_lambda_max, self.n_lambda,\n self.screening_size,\n self.always_select,\n self.primary_model_fit_max_iter, self.primary_model_fit_epsilon,\n self.early_stop, self.approximate_Newton,\n self.thread,\n self.covariance_update,\n self.sparse_matrix,\n self.splicing_type,\n self.important_search,\n A_init,\n p * M, 1 * M,\n 1, 1\n )\n\n # print(\"linear fit end\")\n # print(len(result))\n # print(result)\n if M != 1:\n self.coef_ = result[0].reshape(p, M)\n else:\n self.coef_ = result[0]\n self.intercept_ = result[1]\n\n self.train_loss_ = result[2]\n self.ic_ = result[3]\n\n return self\n" ]
[ [ "scipy.sparse.coo_matrix", "numpy.log", "sklearn.utils.validation.check_is_fitted", "sklearn.utils.validation.check_array", "numpy.lexsort", "numpy.ones", "sklearn.utils.validation.check_X_y", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
poshan0126/TensorFlow-Machine-Learning-Projects
[ "5b8d7988b86e39ffdd8babec2b8e3c791b5e853e" ]
[ "Chapter09/utils.py" ]
[ "'''\nThis file consists of the helper functions for processing\n'''\n\n## Package Imports\nfrom PIL import Image\nimport os\nfrom parameters import *\nimport tensorflow as tf\nimport numpy as np\nimport glob\ntry:\n import wget\nexcept:\n print (\"Can't import wget as you are probably on windows laptop\")\n\ndef extract_files(data_dir,type = 'bags'):\n '''\n :param data_dir: Input directory\n :param type: bags or shoes\n :return: saves the cropped files to the bags to shoes directory\n '''\n input_file_dir = os.path.join(os.getcwd(),data_dir, \"train\")\n result_dir = os.path.join(os.getcwd(),type)\n if not os.path.exists(result_dir):\n os.makedirs(result_dir)\n\n file_names= os.listdir(input_file_dir)\n for file in file_names:\n input_image = Image.open(os.path.join(input_file_dir,file))\n input_image = input_image.resize([128, 64])\n input_image = input_image.crop([64, 0, 128, 64]) # Cropping only the colored image. Excluding the edge image\n input_image.save(os.path.join(result_dir,file))\n\n\ndef generate_dataset():\n '''\n Before executing this function. Follow these steps;\n 1. Download the datasets\n Handbags data Link 'https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/edges2handbags.tar.gz'\n Shoes data Link 'https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/edges2shoes.tar.gz'\n\n 2. Extract the tar files.\n\n 3. Execute this function. This function will extract the handbags and shoe images from the datasets.\n '''\n if not os.path.exists(os.path.join(os.getcwd(), \"edges2handbags\")):\n try:\n print (\"Downloading dataset\")\n bag_data_link = 'https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/edges2handbags.tar.gz'\n shoe_data_link = 'https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/edges2shoes.tar.gz'\n \n wget.download(bag_data_link)\n wget.download(shoe_data_link)\n \n with tarfile.open('./edges2handbags.tar.gz') as tar:\n tar.extractall()\n tar.close()\n \n with tarfile.open('./edges2shoes.tar.gz') as tar:\n tar.extractall()\n tar.close()\n except:\n print (\"It seems you are on windows laptop. Please download the data as instructed in README before executing the code\")\n\n extract_files(\"edges2handbags\", 'bags')\n extract_files(\"edges2shoes\", 'shoes')\n\n\ndef load_data(load_type = 'train'):\n shoelist = glob.glob(os.path.join(os.getcwd(), \"shoes/*jpg\"))\n shoe_data = np.array([np.array(Image.open(fname)) for fname in shoelist]).astype(np.float32)\n baglist = glob.glob(os.path.join(os.getcwd(), \"bags/*jpg\"))\n bags_data = np.array([np.array(Image.open(fname)) for fname in baglist]).astype(np.float32)\n shoe_data = shoe_data/255.\n bags_data = bags_data/255.\n return shoe_data, bags_data\n\n\ndef save_image(global_step, img_data, file_name):\n sample_results_dir = os.path.join(os.getcwd(), \"sample_results\", \"epoch_\" +str(global_step))\n if not os.path.exists(sample_results_dir):\n os.makedirs(sample_results_dir)\n\n\n result = Image.fromarray((img_data[0] * 255).astype(np.uint8))\n result.save(os.path.join(sample_results_dir, file_name + \".jpg\"))\n\n\n\ndef discriminator(x,initializer, scope_name ='discriminator', reuse=False):\n with tf.variable_scope(scope_name) as scope:\n if reuse:\n scope.reuse_variables()\n conv1 = tf.contrib.layers.conv2d(inputs=x, num_outputs=32, kernel_size=4, stride=2, padding=\"SAME\",\n reuse=reuse, activation_fn=tf.nn.leaky_relu, weights_initializer=initializer,\n scope=\"disc_conv1\") # 32 x 32 x 32\n conv2 = tf.contrib.layers.conv2d(inputs=conv1, num_outputs=64, kernel_size=4, stride=2, padding=\"SAME\",\n reuse=reuse, activation_fn=tf.nn.leaky_relu, normalizer_fn=tf.contrib.layers.batch_norm,\n weights_initializer=initializer, scope=\"disc_conv2\") # 16 x 16 x 64\n conv3 = tf.contrib.layers.conv2d(inputs=conv2, num_outputs=128, kernel_size=4, stride=2, padding=\"SAME\",\n reuse=reuse, activation_fn=tf.nn.leaky_relu, normalizer_fn=tf.contrib.layers.batch_norm,\n weights_initializer=initializer, scope=\"disc_conv3\") # 8 x 8 x 128\n conv4 = tf.contrib.layers.conv2d(inputs=conv3, num_outputs=256, kernel_size=4, stride=2, padding=\"SAME\",\n reuse=reuse, activation_fn=tf.nn.leaky_relu, normalizer_fn=tf.contrib.layers.batch_norm,\n weights_initializer=initializer, scope=\"disc_conv4\") # 4 x 4 x 256\n conv5 = tf.contrib.layers.conv2d(inputs=conv4, num_outputs=512, kernel_size=4, stride=2, padding=\"SAME\",\n reuse=reuse, activation_fn=tf.nn.leaky_relu, normalizer_fn=tf.contrib.layers.batch_norm,\n weights_initializer=initializer, scope=\"disc_conv5\") # 2 x 2 x 512\n fc1 = tf.reshape(conv5, shape=[tf.shape(x)[0], 2 * 2 * 512])\n fc1 = tf.contrib.layers.fully_connected(inputs=fc1, num_outputs=512, reuse=reuse, activation_fn=tf.nn.leaky_relu,\n normalizer_fn=tf.contrib.layers.batch_norm,\n weights_initializer=initializer, scope=\"disc_fc1\")\n fc2 = tf.contrib.layers.fully_connected(inputs=fc1, num_outputs=1, reuse=reuse, activation_fn=tf.nn.sigmoid,\n weights_initializer=initializer, scope=\"disc_fc2\")\n\n return fc2\n\n\ndef generator(x, initializer, scope_name = 'generator',reuse=False):\n with tf.variable_scope(scope_name) as scope:\n if reuse:\n scope.reuse_variables()\n conv1 = tf.contrib.layers.conv2d(inputs=x, num_outputs=32, kernel_size=4, stride=2, padding=\"SAME\",\n reuse=reuse, activation_fn=tf.nn.leaky_relu, weights_initializer=initializer,\n scope=\"disc_conv1\") # 32 x 32 x 32\n conv2 = tf.contrib.layers.conv2d(inputs=conv1, num_outputs=64, kernel_size=4, stride=2, padding=\"SAME\",\n reuse=reuse, activation_fn=tf.nn.leaky_relu, normalizer_fn=tf.contrib.layers.batch_norm,\n weights_initializer=initializer, scope=\"disc_conv2\") # 16 x 16 x 64\n conv3 = tf.contrib.layers.conv2d(inputs=conv2, num_outputs=128, kernel_size=4, stride=2, padding=\"SAME\",\n reuse=reuse, activation_fn=tf.nn.leaky_relu, normalizer_fn=tf.contrib.layers.batch_norm,\n weights_initializer=initializer, scope=\"disc_conv3\") # 8 x 8 x 128\n conv4 = tf.contrib.layers.conv2d(inputs=conv3, num_outputs=256, kernel_size=4, stride=2, padding=\"SAME\",\n reuse=reuse, activation_fn=tf.nn.leaky_relu, normalizer_fn=tf.contrib.layers.batch_norm,\n weights_initializer=initializer, scope=\"disc_conv4\") # 4 x 4 x 256\n\n deconv1 = tf.contrib.layers.conv2d(conv4, num_outputs=4 * 128, kernel_size=4, stride=1, padding=\"SAME\",\n activation_fn=tf.nn.relu, normalizer_fn=tf.contrib.layers.batch_norm,\n weights_initializer=initializer, scope=\"gen_conv1\")\n deconv1 = tf.reshape(deconv1, shape=[tf.shape(x)[0], 8, 8, 128])\n\n deconv2 = tf.contrib.layers.conv2d(deconv1, num_outputs=4 * 64, kernel_size=4, stride=1, padding=\"SAME\",\n activation_fn=tf.nn.relu, normalizer_fn=tf.contrib.layers.batch_norm,\n weights_initializer=initializer, scope=\"gen_conv2\")\n deconv2 = tf.reshape(deconv2, shape=[tf.shape(x)[0], 16, 16, 64])\n\n deconv3 = tf.contrib.layers.conv2d(deconv2, num_outputs=4 * 32, kernel_size=4, stride=1, padding=\"SAME\",\n activation_fn=tf.nn.relu, normalizer_fn=tf.contrib.layers.batch_norm,\n weights_initializer=initializer, scope=\"gen_conv3\")\n deconv3 = tf.reshape(deconv3, shape=[tf.shape(x)[0], 32, 32, 32])\n\n deconv4 = tf.contrib.layers.conv2d(deconv3, num_outputs=4 * 16, kernel_size=4, stride=1, padding=\"SAME\",\n activation_fn=tf.nn.relu, normalizer_fn=tf.contrib.layers.batch_norm,\n weights_initializer=initializer, scope=\"gen_conv4\")\n deconv4 = tf.reshape(deconv4, shape=[tf.shape(x)[0], 64, 64, 16])\n\n recon = tf.contrib.layers.conv2d(deconv4, num_outputs=3, kernel_size=4, stride=1, padding=\"SAME\", \\\n activation_fn=tf.nn.relu, scope=\"gen_conv5\")\n\n return recon\n " ]
[ [ "tensorflow.variable_scope", "tensorflow.contrib.layers.conv2d", "tensorflow.shape", "tensorflow.contrib.layers.fully_connected" ] ]
JakobStruye/maml_rl
[ "ec92a5138127a86a4c15925c70e61dbdf038cd18" ]
[ "sandbox/rocky/tf/algos/batch_polopt.py" ]
[ "import time\nfrom rllab.algos.base import RLAlgorithm\nimport rllab.misc.logger as logger\nimport rllab.plotter as plotter\nfrom sandbox.rocky.tf.policies.base import Policy\nimport tensorflow as tf\nfrom sandbox.rocky.tf.samplers.batch_sampler import BatchSampler\nfrom sandbox.rocky.tf.samplers.vectorized_sampler import VectorizedSampler\n\n\nclass BatchPolopt(RLAlgorithm):\n \"\"\"\n Base class for batch sampling-based policy optimization methods.\n This includes various policy gradient methods like vpg, npg, ppo, trpo, etc.\n \"\"\"\n\n def __init__(\n self,\n env,\n policy,\n baseline,\n scope=None,\n n_itr=500,\n start_itr=0,\n batch_size=5000,\n max_path_length=500,\n discount=0.99,\n gae_lambda=1,\n plot=False,\n pause_for_plot=False,\n center_adv=True,\n positive_adv=False,\n store_paths=False,\n whole_paths=True,\n fixed_horizon=False,\n sampler_cls=None,\n sampler_args=None,\n force_batch_sampler=False,\n load_policy=None,\n reset_arg=None,\n **kwargs\n ):\n \"\"\"\n :param env: Environment\n :param policy: Policy\n :type policy: Policy\n :param baseline: Baseline\n :param scope: Scope for identifying the algorithm. Must be specified if running multiple algorithms\n simultaneously, each using different environments and policies\n :param n_itr: Number of iterations.\n :param start_itr: Starting iteration.\n :param batch_size: Number of samples per iteration.\n :param max_path_length: Maximum length of a single rollout.\n :param discount: Discount.\n :param gae_lambda: Lambda used for generalized advantage estimation.\n :param plot: Plot evaluation run after each iteration.\n :param pause_for_plot: Whether to pause before contiuing when plotting.\n :param center_adv: Whether to rescale the advantages so that they have mean 0 and standard deviation 1.\n :param positive_adv: Whether to shift the advantages so that they are always positive. When used in\n conjunction with center_adv the advantages will be standardized before shifting.\n :param store_paths: Whether to save all paths data to the snapshot.\n :return:\n \"\"\"\n self.env = env\n self.policy = policy\n self.load_policy=load_policy\n self.baseline = baseline\n self.scope = scope\n self.n_itr = n_itr\n self.start_itr = start_itr\n self.batch_size = batch_size\n self.max_path_length = max_path_length\n self.discount = discount\n self.gae_lambda = gae_lambda\n self.plot = plot\n self.pause_for_plot = pause_for_plot\n self.center_adv = center_adv\n self.positive_adv = positive_adv\n self.store_paths = store_paths\n self.whole_paths = whole_paths\n self.fixed_horizon = fixed_horizon\n if sampler_cls is None:\n #if self.policy.vectorized and not force_batch_sampler:\n #sampler_cls = VectorizedSampler\n #else:\n sampler_cls = BatchSampler\n if sampler_args is None:\n sampler_args = dict()\n self.sampler = sampler_cls(self, **sampler_args)\n self.reset_arg = reset_arg\n\n def start_worker(self):\n self.sampler.start_worker()\n if self.plot:\n plotter.init_plot(self.env, self.policy)\n\n def shutdown_worker(self):\n self.sampler.shutdown_worker()\n\n def obtain_samples(self, itr):\n return self.sampler.obtain_samples(itr, reset_args=self.reset_arg)\n\n def process_samples(self, itr, paths):\n return self.sampler.process_samples(itr, paths)\n\n def train(self):\n with tf.Session() as sess:\n if self.load_policy is not None:# and False:\n import joblib\n self.policy = joblib.load(self.load_policy)['policy']\n self.init_opt()\n # initialize uninitialized vars (I know, it's ugly)\n uninit_vars = []\n for var in tf.all_variables():\n try:\n sess.run(var)\n except tf.errors.FailedPreconditionError:\n uninit_vars.append(var)\n sess.run(tf.initialize_variables(uninit_vars))\n #sess.run(tf.initialize_all_variables())\n self.start_worker()\n start_time = time.time()\n for itr in range(self.start_itr, self.n_itr):\n itr_start_time = time.time()\n with logger.prefix('itr #%d | ' % itr):\n\n logger.log(\"Obtaining samples...\")\n paths = self.obtain_samples(itr)\n logger.log(\"Processing samples...\")\n samples_data = self.process_samples(itr, paths)\n logger.log(\"Logging diagnostics...\")\n self.log_diagnostics(paths)\n logger.log(\"Optimizing policy...\")\n self.optimize_policy(itr, samples_data)\n #new_param_values = self.policy.get_variable_values(self.policy.all_params)\n\n logger.log(\"Saving snapshot...\")\n params = self.get_itr_snapshot(itr, samples_data) # , **kwargs)\n if self.store_paths:\n params[\"paths\"] = samples_data[\"paths\"]\n logger.save_itr_params(itr, params)\n logger.log(\"Saved\")\n logger.record_tabular('Time', time.time() - start_time)\n logger.record_tabular('ItrTime', time.time() - itr_start_time)\n\n #import pickle\n #with open('paths_itr'+str(itr)+'.pkl', 'wb') as f:\n # pickle.dump(paths, f)\n\n # debugging\n \"\"\"\n if itr % 1 == 0:\n logger.log(\"Saving visualization of paths\")\n import matplotlib.pyplot as plt;\n for ind in range(5):\n plt.clf(); plt.hold(True)\n points = paths[ind]['observations']\n plt.plot(points[:,0], points[:,1], '-r', linewidth=2)\n plt.xlim([-1.0, 1.0])\n plt.ylim([-1.0, 1.0])\n plt.legend(['path'])\n plt.savefig('/home/cfinn/path'+str(ind)+'.png')\n \"\"\"\n # end debugging\n\n logger.dump_tabular(with_prefix=False)\n if self.plot:\n self.update_plot()\n if self.pause_for_plot:\n input(\"Plotting evaluation run: Press Enter to \"\n \"continue...\")\n self.shutdown_worker()\n\n def log_diagnostics(self, paths):\n self.env.log_diagnostics(paths)\n self.policy.log_diagnostics(paths)\n self.baseline.log_diagnostics(paths)\n\n def init_opt(self):\n \"\"\"\n Initialize the optimization procedure. If using tensorflow, this may\n include declaring all the variables and compiling functions\n \"\"\"\n raise NotImplementedError\n\n def get_itr_snapshot(self, itr, samples_data):\n \"\"\"\n Returns all the data that should be saved in the snapshot for this\n iteration.\n \"\"\"\n raise NotImplementedError\n\n def optimize_policy(self, itr, samples_data):\n raise NotImplementedError\n\n def update_plot(self):\n if self.plot:\n plotter.update_plot(self.policy, self.max_path_length)\n" ]
[ [ "tensorflow.all_variables", "tensorflow.initialize_variables", "tensorflow.Session" ] ]
ITAINNOVA/SAME
[ "d46dda98753fcb3606e04c3db2d20c9e700140e8" ]
[ "semantic_aware_models/models/classification/brnn_classifier.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.parallel\nfrom semantic_aware_models.utils.gpu import GPU\n\ngpu = GPU()\n\n\nclass BiRNN(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, num_classes):\n super(BiRNN, self).__init__()\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True).to(gpu.get_default_device())\n self.fc = nn.Linear(hidden_size * 2, num_classes)\n\n def forward(self, x):\n # Initialize parameters\n h0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size)\n h0 = gpu.to_device(h0, gpu.get_default_device())\n c0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size)\n c0 = gpu.to_device(c0, gpu.get_default_device())\n\n # Forward propagate LSTM\n out, hidden = self.lstm(x, (h0, c0))\n out = gpu.to_device(out, gpu.get_default_device())\n hidden = gpu.to_device(hidden, gpu.get_default_device())\n\n # Decode the hidden state of the last time step\n out = self.fc(out)\n return out\n\n\nclass UnstructuredModel(nn.Module):\n def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size, batch_size):\n super(UnstructuredModel, self).__init__()\n self.embedding_dim = embedding_dim\n self.hidden_dim = hidden_dim\n self.batch_size = batch_size\n\n self.word_embeddings = nn.Embedding(vocab_size + 1, embedding_dim).to(gpu.get_default_device())\n\n biRNN = BiRNN(input_size=embedding_dim, hidden_size=hidden_dim, num_layers=1, num_classes=tagset_size)\n\n self.brnn = biRNN.to(gpu.get_default_device())\n self.linear_1 = nn.Linear(int(embedding_dim), int(embedding_dim / 2))\n self.linear_2 = nn.Linear(int(embedding_dim / 2), int(embedding_dim))\n\n def forward(self, input):\n embeds = self.word_embeddings(input)\n\n brnn_out = self.brnn(embeds.view(-1, self.batch_size, self.embedding_dim))\n brnn_out = brnn_out.view(self.batch_size, -1, self.embedding_dim)\n aux = brnn_out.mean(-2)\n\n tag_space = torch.tanh(self.linear_1(aux))\n tag_scores = torch.tanh(self.linear_2(tag_space))\n return tag_scores\n\n\nclass StructuredModel(nn.Module):\n def __init__(self, embedding_dim, vocab_size):\n super(StructuredModel, self).__init__()\n self.word_embeddings = nn.Embedding(vocab_size + 1, embedding_dim).to(gpu.get_default_device())\n self.linear_1 = nn.Linear(int(embedding_dim), int(embedding_dim / 2))\n self.linear_2 = nn.Linear(int(embedding_dim / 2), int(embedding_dim))\n\n def forward(self, input):\n embeds = self.word_embeddings(input)\n aux = embeds.mean(-2)\n tag_space = torch.tanh(self.linear_1(aux))\n tag_scores = torch.tanh(self.linear_2(tag_space))\n return tag_scores\n\n\nclass DeepCBRS(nn.Module):\n \"\"\"\n This implementation is loosely based on their LUA's homonym published at https://github.com/nlp-deepcbrs/amar\n \"\"\"\n def __init__(self, items_data, genres_data, authors_data,\n directors_data, wiki_categories_data, batch_size): # ratings_data, initial_weights, batch_size\n super(DeepCBRS, self).__init__()\n\n self.item_embeddings_size = 50\n self.genre_embeddings_size = 50\n self.author_embeddings_size = 50\n self.director_embeddings_size = 50\n self.property_embeddings_size = 50\n self.wiki_category_embeddings_size = 50\n self.half_embeddings_size = self.item_embeddings_size / 2\n self.hidden_dense_layer_size = self.item_embeddings_size\n self.batch_size = batch_size\n\n self.num_tokens = len(items_data['token2id'])\n self.genres_vocab_size = len(genres_data['genre2id'])\n self.authors_vocab_size = len(authors_data['author2id'])\n self.directors_vocab_size = len(directors_data['director2id'])\n self.wiki_categorires_vocab_size = len(wiki_categories_data['wiki_category2id'])\n\n # Networks building\n self.items_net = UnstructuredModel(embedding_dim=self.item_embeddings_size,\n hidden_dim=self.hidden_dense_layer_size,\n vocab_size=self.num_tokens, tagset_size=self.item_embeddings_size,\n batch_size=self.batch_size)\n if genres_data:\n self.hidden_dense_layer_size = self.hidden_dense_layer_size + self.genre_embeddings_size\n self.genres_net = StructuredModel(embedding_dim=self.genre_embeddings_size,\n vocab_size=self.genres_vocab_size)\n\n if authors_data:\n self.hidden_dense_layer_size = self.hidden_dense_layer_size + self.author_embeddings_size\n self.authors_net = StructuredModel(embedding_dim=self.author_embeddings_size,\n vocab_size=self.authors_vocab_size)\n\n if directors_data:\n self.hidden_dense_layer_size = self.hidden_dense_layer_size + self.director_embeddings_size\n self.directors_net = StructuredModel(embedding_dim=self.director_embeddings_size,\n vocab_size=self.directors_vocab_size)\n\n if wiki_categories_data:\n self.hidden_dense_layer_size = self.hidden_dense_layer_size + self.wiki_category_embeddings_size\n self.wiki_categories_net = StructuredModel(embedding_dim=self.wiki_category_embeddings_size,\n vocab_size=self.wiki_categorires_vocab_size)\n\n self.half_embeddings_size = self.hidden_dense_layer_size / 2\n self.linear_1 = nn.Linear(self.hidden_dense_layer_size, 5)\n\n def forward(self, item_input, genres_input, authors_input, directors_input, wiki_categories_input):\n item_output = self.items_net(item_input)\n genres_output = self.genres_net(genres_input)\n authors_output = self.authors_net(authors_input)\n directors_output = self.authors_net(directors_input)\n wiki_categories_output = self.wiki_categories_net(wiki_categories_input)\n\n join_table = torch.cat((item_output, genres_output, authors_output, directors_output, wiki_categories_output), -1)\n output = torch.sigmoid(self.linear_1(join_table))\n\n return output\n" ]
[ [ "torch.nn.Linear", "torch.nn.LSTM", "torch.nn.Embedding", "torch.cat" ] ]
Debapriya-Tula/Face-Verification
[ "939bc1adc171f67be375d4b98b0ef3520a82afbf" ]
[ "detect_and_verify.py" ]
[ "import os\nfrom os.path import join as j\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\ndef detect_face(img):\n # The file containing the pretrained classifier \n haar_file = 'haarcascade_frontalface_default.xml'\n \n # Image to be resized to this shape\n (width, height) = (224, 224) \n \n # Make the cascade classifier object\n face_cascade = cv2.CascadeClassifier(haar_file)\n \n # Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) \n\n # Detect the face\n faces = face_cascade.detectMultiScale(gray, 1.3, 4) \n\n face_resize = None\n for (x, y, w, h) in faces:\n # The classifier seemed to scrap the chin and hair. Adjustments made to accomodate those.\n face = img[y-60:y + h + 60, x-20:x + w+20] \n face_resize = cv2.resize(face, (width, height))\n \n return face_resize\n\n\ndef mean_squared_error(y_true, y_pred):\n return K.mean(K.square(y_true - y_pred))\n\n\ndef verify_face(img, username):\n dir_ = 'dataset'\n NUM_CLASSES = len(os.listdir(dir_))\n \n # The directory containing the user's photos\n pos_dir = username\n P = os.listdir(os.path.join(dir_, pos_dir))[-1]\n\n neg_dirs = [] #os.listdir(dir_)[np.random.randint(NUM_CLASSES)]\n\n for i in range(3):\n neg_dir = os.listdir(dir_)[np.random.randint(NUM_CLASSES)]\n while neg_dir == pos_dir or neg_dir in neg_dirs:\n neg_dir = os.listdir(dir_)[np.random.randint(NUM_CLASSES)]\n neg_dirs.append(neg_dir)\n \n\n P = plt.imread(j(dir_, pos_dir, os.listdir(j(dir_, pos_dir)))[-1])\n N1 = plt.imread(dir_ +'/'+ neg_dirs[0] +'/'+ os.listdir(dir_+'/'+neg_dirs[0])[-1])\n N2 = plt.imread(dir_ +'/'+ neg_dirs[1] +'/'+ os.listdir(dir_+'/'+neg_dirs[1])[-1])\n N3 = plt.imread(dir_ +'/'+ neg_dirs[2] +'/'+ os.listdir(dir_+'/'+neg_dirs[2])[-1])\n\n P = cv2.resize(P, (224,224))\n N1 = cv2.resize(N1, (224,224))\n N2 = cv2.resize(N2, (224,224))\n N3 = cv2.resize(N3, (224,224))\n \n A = np.reshape(img, (1,224,224,3))\n P = np.reshape(P, (1,224,224,3))\n N1, N2, N3 = [np.reshape(N, (1,224,224,3)) for N in [N1, N2, N3]]\n\n\n req_model = load_req_model('<path to model.h5>')\n\n enc_anc = req_model.predict(A)\n enc_pos = req_model.predict(P)\n enc_neg_1 = req_model.predict(N1)\n enc_neg_2 = req_model.predict(N2)\n enc_neg_3 = req_model.predict(N3)\n\n \n # Normalizing the encodings to avoid large values\n maxm = np.max(enc_anc)\n enc_anc = enc_anc/maxm\n enc_pos = enc_pos/maxm\n enc_neg_1, enc_neg_2, enc_neg_3 = [enc/maxm for enc in [enc_neg_1, enc_neg_2, enc_neg_3]]\n\n positive_loss = mean_squared_error(enc_anc, enc_pos).numpy()\n negative_losses = [mean_squared_error(enc_anc, enc_neg).numpy() \n for enc_neg in [enc_neg_1, enc_neg_2, enc_neg_3]]\n\n # flag becomes false if the match is unsuccessful\n flag = True\n for neg_loss in negative_losses:\n if positive_loss > neg_loss:\n flag = False\n \n return flag\n\n\n\n# Here it is assumed that some app has provided with a webcam click called 'img' at the time of login.\nface_detected = detect_face(img) \n# The username is provided at the time of login.\nflag = verify_face(face_detected, username)\n" ]
[ [ "numpy.reshape", "numpy.max", "numpy.random.randint" ] ]
s4ayub/torchrec
[ "eaa0915c9c1563d47df3a4a075c2e51b3b7ca27f" ]
[ "torchrec/distributed/tests/test_model.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom dataclasses import dataclass\nfrom typing import List, cast, Optional, Tuple, Any, Dict, Union\n\nimport torch\nimport torch.nn as nn\nfrom torchrec.distributed.embedding_types import EmbeddingTableConfig\nfrom torchrec.distributed.embeddingbag import (\n EmbeddingBagSharder,\n EmbeddingBagCollectionSharder,\n)\nfrom torchrec.modules.embedding_configs import EmbeddingBagConfig, BaseEmbeddingConfig\nfrom torchrec.modules.embedding_modules import EmbeddingBagCollection\nfrom torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor\nfrom torchrec.types import Pipelineable\n\n\n@dataclass\nclass ModelInput(Pipelineable):\n float_features: torch.Tensor\n idlist_features: KeyedJaggedTensor\n idscore_features: KeyedJaggedTensor\n label: torch.Tensor\n\n @staticmethod\n def generate(\n batch_size: int,\n world_size: int,\n num_float_features: int,\n tables: Union[List[EmbeddingTableConfig], List[EmbeddingBagConfig]],\n weighted_tables: Union[List[EmbeddingTableConfig], List[EmbeddingBagConfig]],\n pooling_avg: int = 10,\n ) -> Tuple[\"ModelInput\", List[\"ModelInput\"]]:\n \"\"\"\n Returns a global (single-rank training) batch\n and a list of local (multi-rank training) batches of world_size.\n \"\"\"\n idlist_features = [\n feature for table in tables for feature in table.feature_names\n ]\n idscore_features = [\n feature for table in weighted_tables for feature in table.feature_names\n ]\n\n idlist_ind_ranges = [table.num_embeddings for table in tables]\n idscore_ind_ranges = [table.num_embeddings for table in weighted_tables]\n\n # Generate global batch.\n global_idlist_lengths = []\n global_idlist_indices = []\n global_idscore_lengths = []\n global_idscore_indices = []\n global_idscore_weights = []\n\n for ind_range in idlist_ind_ranges:\n lengths = torch.abs(\n torch.randn(batch_size * world_size) + pooling_avg\n ).int()\n num_indices = cast(int, torch.sum(lengths).item())\n indices = torch.randint(0, ind_range, (num_indices,))\n global_idlist_lengths.append(lengths)\n global_idlist_indices.append(indices)\n global_idlist_kjt = KeyedJaggedTensor(\n keys=idlist_features,\n values=torch.cat(global_idlist_indices),\n lengths=torch.cat(global_idlist_lengths),\n )\n\n for ind_range in idscore_ind_ranges:\n lengths = torch.abs(\n torch.randn(batch_size * world_size) + pooling_avg\n ).int()\n num_indices = cast(int, torch.sum(lengths).item())\n indices = torch.randint(0, ind_range, (num_indices,))\n weights = torch.rand((num_indices,))\n global_idscore_lengths.append(lengths)\n global_idscore_indices.append(indices)\n global_idscore_weights.append(weights)\n global_idscore_kjt = (\n KeyedJaggedTensor(\n keys=idscore_features,\n values=torch.cat(global_idscore_indices),\n lengths=torch.cat(global_idscore_lengths),\n weights=torch.cat(global_idscore_weights),\n )\n if global_idscore_indices\n else None\n )\n\n global_float = torch.rand((batch_size * world_size, num_float_features))\n global_label = torch.rand(batch_size * world_size)\n\n # Split global batch into local batches.\n local_inputs = []\n for r in range(world_size):\n local_idlist_lengths = []\n local_idlist_indices = []\n local_idscore_lengths = []\n local_idscore_indices = []\n local_idscore_weights = []\n\n for lengths, indices in zip(global_idlist_lengths, global_idlist_indices):\n local_idlist_lengths.append(\n lengths[r * batch_size : (r + 1) * batch_size]\n )\n lengths_cumsum = [0] + lengths.view(world_size, -1).sum(dim=1).cumsum(\n dim=0\n ).tolist()\n local_idlist_indices.append(\n indices[lengths_cumsum[r] : lengths_cumsum[r + 1]]\n )\n\n for lengths, indices, weights in zip(\n global_idscore_lengths, global_idscore_indices, global_idscore_weights\n ):\n local_idscore_lengths.append(\n lengths[r * batch_size : (r + 1) * batch_size]\n )\n lengths_cumsum = [0] + lengths.view(world_size, -1).sum(dim=1).cumsum(\n dim=0\n ).tolist()\n local_idscore_indices.append(\n indices[lengths_cumsum[r] : lengths_cumsum[r + 1]]\n )\n local_idscore_weights.append(\n weights[lengths_cumsum[r] : lengths_cumsum[r + 1]]\n )\n\n local_idlist_kjt = KeyedJaggedTensor(\n keys=idlist_features,\n values=torch.cat(local_idlist_indices),\n lengths=torch.cat(local_idlist_lengths),\n )\n\n local_idscore_kjt = (\n KeyedJaggedTensor(\n keys=idscore_features,\n values=torch.cat(local_idscore_indices),\n lengths=torch.cat(local_idscore_lengths),\n weights=torch.cat(local_idscore_weights),\n )\n if local_idscore_indices\n else None\n )\n\n local_input = ModelInput(\n float_features=global_float[r * batch_size : (r + 1) * batch_size],\n idlist_features=local_idlist_kjt,\n idscore_features=local_idscore_kjt,\n label=global_label[r * batch_size : (r + 1) * batch_size],\n )\n local_inputs.append(local_input)\n\n return (\n ModelInput(\n float_features=global_float,\n idlist_features=global_idlist_kjt,\n idscore_features=global_idscore_kjt,\n label=global_label,\n ),\n local_inputs,\n )\n\n def to(self, device: torch.device, non_blocking: bool = False) -> \"ModelInput\":\n return ModelInput(\n float_features=self.float_features.to(\n device=device, non_blocking=non_blocking\n ),\n idlist_features=self.idlist_features.to(\n device=device, non_blocking=non_blocking\n ),\n # pyre-ignore [6]\n idscore_features=self.idscore_features.to(\n device=device, non_blocking=non_blocking\n )\n if self.idscore_features is not None\n else None,\n label=self.label.to(device=device, non_blocking=non_blocking),\n )\n\n def record_stream(self, stream: torch.cuda.streams.Stream) -> None:\n self.float_features.record_stream(stream)\n self.idlist_features.record_stream(stream)\n if self.idscore_features is not None:\n self.idscore_features.record_stream(stream)\n self.label.record_stream(stream)\n\n\nclass TestDenseArch(nn.Module):\n \"\"\"\n Basic nn.Module for testing\n\n Constructor Args:\n device\n\n Call Args:\n dense_input: torch.Tensor\n\n Returns:\n KeyedTensor\n\n Example:\n >>> TestDenseArch()\n \"\"\"\n\n def __init__(\n self,\n num_float_features: int = 10,\n device: Optional[torch.device] = None,\n ) -> None:\n super().__init__()\n if device is None:\n device = torch.device(\"cpu\")\n self.linear: nn.modules.Linear = nn.Linear(\n in_features=num_float_features, out_features=8, device=device\n )\n\n def forward(self, dense_input: torch.Tensor) -> torch.Tensor:\n return self.linear(dense_input)\n\n\nclass TestOverArch(nn.Module):\n \"\"\"\n Basic nn.Module for testing\n\n Constructor Args:\n device\n\n Call Args:\n dense: torch.Tensor,\n sparse: KeyedTensor,\n\n Returns:\n torch.Tensor\n\n Example:\n >>> TestOverArch()\n \"\"\"\n\n def __init__(\n self,\n tables: List[EmbeddingBagConfig],\n weighted_tables: List[EmbeddingBagConfig],\n device: Optional[torch.device] = None,\n ) -> None:\n super().__init__()\n if device is None:\n device = torch.device(\"cpu\")\n self._features: List[str] = [\n feature for table in tables for feature in table.feature_names\n ]\n self._weighted_features: List[str] = [\n feature for table in weighted_tables for feature in table.feature_names\n ]\n in_features = (\n 8\n + sum([table.embedding_dim * len(table.feature_names) for table in tables])\n + sum(\n [\n table.embedding_dim * len(table.feature_names)\n for table in weighted_tables\n ]\n )\n )\n self.linear: nn.modules.Linear = nn.Linear(\n in_features=in_features, out_features=16, device=device\n )\n\n def forward(\n self,\n dense: torch.Tensor,\n sparse: KeyedTensor,\n ) -> torch.Tensor:\n ret_list = []\n ret_list.append(dense)\n for feature_name in self._features:\n ret_list.append(sparse[feature_name])\n for feature_name in self._weighted_features:\n ret_list.append(sparse[feature_name])\n return self.linear(torch.cat(ret_list, dim=1))\n\n\nclass TestSparseArch(nn.Module):\n \"\"\"\n Basic nn.Module for testing\n\n Constructor Args:\n tables\n device\n\n Call Args:\n features\n\n Returns:\n KeyedTensor\n \"\"\"\n\n def __init__(\n self,\n tables: List[EmbeddingBagConfig],\n weighted_tables: List[EmbeddingBagConfig],\n device: Optional[torch.device] = None,\n ) -> None:\n super().__init__()\n if device is None:\n device = torch.device(\"cpu\")\n self.ebc: EmbeddingBagCollection = EmbeddingBagCollection(\n tables=tables,\n device=device,\n )\n self.weighted_ebc: EmbeddingBagCollection = EmbeddingBagCollection(\n tables=weighted_tables,\n is_weighted=True,\n device=device,\n )\n\n def forward(\n self, features: KeyedJaggedTensor, weighted_features: KeyedJaggedTensor\n ) -> KeyedTensor:\n ebc = self.ebc(features)\n w_ebc = self.weighted_ebc(weighted_features)\n return KeyedTensor(\n keys=ebc.keys() + w_ebc.keys(),\n length_per_key=ebc.length_per_key() + w_ebc.length_per_key(),\n values=torch.cat([ebc.values(), w_ebc.values()], dim=1),\n )\n\n\nclass TestSparseNNBase(nn.Module):\n \"\"\"\n Base class for a SparseNN model.\n\n Constructor Args:\n tables: List[BaseEmbeddingConfig],\n weighted_tables: Optional[List[BaseEmbeddingConfig]],\n embedding_groups: Optional[Dict[str, List[str]]],\n dense_device: Optional[torch.device],\n sparse_device: Optional[torch.device],\n \"\"\"\n\n def __init__(\n self,\n tables: List[BaseEmbeddingConfig],\n weighted_tables: Optional[List[BaseEmbeddingConfig]] = None,\n embedding_groups: Optional[Dict[str, List[str]]] = None,\n dense_device: Optional[torch.device] = None,\n sparse_device: Optional[torch.device] = None,\n ) -> None:\n super().__init__()\n if dense_device is None:\n dense_device = torch.device(\"cpu\")\n if sparse_device is None:\n sparse_device = torch.device(\"cpu\")\n\n\nclass TestSparseNN(TestSparseNNBase):\n \"\"\"\n Simple version of a SparseNN model.\n\n Constructor Args:\n tables: List[EmbeddingBagConfig],\n weighted_tables: Optional[List[EmbeddingBagConfig]],\n embedding_groups: Optional[Dict[str, List[str]]],\n dense_device: Optional[torch.device],\n sparse_device: Optional[torch.device],\n\n Call Args:\n input: ModelInput,\n\n Returns:\n torch.Tensor\n\n Example:\n >>> TestSparseNN()\n \"\"\"\n\n def __init__(\n self,\n tables: List[EmbeddingBagConfig],\n num_float_features: int = 10,\n weighted_tables: Optional[List[EmbeddingBagConfig]] = None,\n embedding_groups: Optional[Dict[str, List[str]]] = None,\n dense_device: Optional[torch.device] = None,\n sparse_device: Optional[torch.device] = None,\n ) -> None:\n super().__init__(\n tables=cast(List[BaseEmbeddingConfig], tables),\n weighted_tables=cast(Optional[List[BaseEmbeddingConfig]], weighted_tables),\n embedding_groups=embedding_groups,\n dense_device=dense_device,\n sparse_device=sparse_device,\n )\n if weighted_tables is None:\n weighted_tables = []\n\n self.dense = TestDenseArch(num_float_features, dense_device)\n self.sparse = TestSparseArch(tables, weighted_tables, sparse_device)\n self.over = TestOverArch(tables, weighted_tables, dense_device)\n\n def forward(\n self,\n input: ModelInput,\n ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:\n dense_r = self.dense(input.float_features)\n sparse_r = self.sparse(input.idlist_features, input.idscore_features)\n over_r = self.over(dense_r, sparse_r)\n pred = torch.sigmoid(torch.mean(over_r, dim=1))\n if self.training:\n return (\n torch.nn.functional.binary_cross_entropy_with_logits(pred, input.label),\n pred,\n )\n else:\n return pred\n\n\nclass TestEBCSharder(EmbeddingBagCollectionSharder[EmbeddingBagCollection]):\n def __init__(\n self, sharding_type: str, kernel_type: str, fused_params: Dict[str, Any] = {}\n ) -> None:\n self._sharding_type = sharding_type\n self._kernel_type = kernel_type\n self._fused_params = fused_params\n\n \"\"\"\n Restricts sharding to single type only.\n \"\"\"\n\n def sharding_types(self, compute_device_type: str) -> List[str]:\n return [self._sharding_type]\n\n \"\"\"\n Restricts to single impl.\n \"\"\"\n\n def compute_kernels(\n self, sharding_type: str, compute_device_type: str\n ) -> List[str]:\n return [self._kernel_type]\n\n @property\n def fused_params(self) -> Optional[Dict[str, Any]]:\n return self._fused_params\n\n\nclass TestEBSharder(EmbeddingBagSharder[nn.EmbeddingBag]):\n def __init__(\n self, sharding_type: str, kernel_type: str, fused_params: Dict[str, Any]\n ) -> None:\n self._sharding_type = sharding_type\n self._kernel_type = kernel_type\n self._fused_params = fused_params\n\n \"\"\"\n Restricts sharding to single type only.\n \"\"\"\n\n def sharding_types(self, compute_device_type: str) -> List[str]:\n return [self._sharding_type]\n\n \"\"\"\n Restricts to single impl.\n \"\"\"\n\n def compute_kernels(\n self, sharding_type: str, compute_device_type: str\n ) -> List[str]:\n return [self._kernel_type]\n\n @property\n def fused_params(self) -> Optional[Dict[str, Any]]:\n return self._fused_params\n" ]
[ [ "torch.mean", "torch.randint", "torch.cat", "torch.randn", "torch.nn.functional.binary_cross_entropy_with_logits", "torch.sum", "torch.nn.Linear", "torch.rand", "torch.device" ] ]
shofer16450/MetPy
[ "82fcb11ead3f777047d2d2a25decb11949b81b4b", "82fcb11ead3f777047d2d2a25decb11949b81b4b" ]
[ "metpy/io/tests/test_nexrad.py", "examples/gridding/Wind_SLP_Interpolation.py" ]
[ "# Copyright (c) 2015,2016,2017 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"Test the `nexrad` module.\"\"\"\n\nfrom datetime import datetime\nimport glob\nfrom io import BytesIO\nimport logging\nimport os.path\n\nimport numpy as np\nimport pytest\n\nfrom metpy.cbook import get_test_data\nfrom metpy.io import is_precip_mode, Level2File, Level3File\n\n# Turn off the warnings for tests\nlogging.getLogger('metpy.io.nexrad').setLevel(logging.CRITICAL)\n\n#\n# NEXRAD Level 2 Tests\n#\n\n# 1999 file tests old message 1\n# KFTG tests bzip compression and newer format for a part of message 31\n# KTLX 2015 has missing segments for message 18, which was causing exception\nlevel2_files = [('KTLX20130520_201643_V06.gz', datetime(2013, 5, 20, 20, 16, 46), 17),\n ('KTLX19990503_235621.gz', datetime(1999, 5, 3, 23, 56, 21), 16),\n ('Level2_KFTG_20150430_1419.ar2v', datetime(2015, 4, 30, 14, 19, 11), 12),\n ('KTLX20150530_000802_V06.bz2', datetime(2015, 5, 30, 0, 8, 3), 14),\n ('KICX_20170712_1458', datetime(2017, 7, 12, 14, 58, 5), 14)]\n\n\n# ids here fixes how things are presented in pycharm\[email protected]('fname, voltime, num_sweeps', level2_files,\n ids=[i[0].replace('.', '_') for i in level2_files])\ndef test_level2(fname, voltime, num_sweeps):\n \"\"\"Test reading NEXRAD level 2 files from the filename.\"\"\"\n f = Level2File(get_test_data(fname, as_file_obj=False))\n assert f.dt == voltime\n assert len(f.sweeps) == num_sweeps\n\n\ndef test_level2_fobj():\n \"\"\"Test reading NEXRAD level2 data from a file object.\"\"\"\n Level2File(get_test_data('Level2_KFTG_20150430_1419.ar2v'))\n\n\ndef test_doubled_file():\n \"\"\"Test for #489 where doubled-up files didn't parse at all.\"\"\"\n data = get_test_data('Level2_KFTG_20150430_1419.ar2v').read()\n fobj = BytesIO(data + data)\n f = Level2File(fobj)\n assert len(f.sweeps) == 12\n\n\n#\n# NIDS/Level 3 Tests\n#\nnexrad_nids_files = glob.glob(os.path.join(get_test_data('nids', as_file_obj=False), 'K???_*'))\n\n\[email protected]('fname', nexrad_nids_files)\ndef test_level3_files(fname):\n \"\"\"Test opening a NEXRAD NIDS file.\"\"\"\n f = Level3File(fname)\n\n # If we have some raster data in the symbology block, feed it into the mapper to make\n # sure it's working properly (Checks for #253)\n if hasattr(f, 'sym_block'):\n block = f.sym_block[0][0]\n if 'data' in block:\n f.map_data(block['data'])\n\n assert f.filename == fname\n\n\ntdwr_nids_files = glob.glob(os.path.join(get_test_data('nids', as_file_obj=False),\n 'Level3_MCI_*'))\n\n\[email protected]('fname', tdwr_nids_files)\ndef test_tdwr_nids(fname):\n \"\"\"Test opening a TDWR NIDS file.\"\"\"\n Level3File(fname)\n\n\ndef test_basic():\n \"\"\"Test reading one specific NEXRAD NIDS file based on the filename.\"\"\"\n f = Level3File(get_test_data('nids/Level3_FFC_N0Q_20140407_1805.nids', as_file_obj=False))\n assert f.metadata['prod_time'].replace(second=0) == datetime(2014, 4, 7, 18, 5)\n assert f.metadata['vol_time'].replace(second=0) == datetime(2014, 4, 7, 18, 5)\n assert f.metadata['msg_time'].replace(second=0) == datetime(2014, 4, 7, 18, 6)\n assert f.filename == get_test_data('nids/Level3_FFC_N0Q_20140407_1805.nids',\n as_file_obj=False)\n\n # At this point, really just want to make sure that __str__ is able to run and produce\n # something not empty, the format is still up for grabs.\n assert str(f)\n\n\ndef test_tdwr():\n \"\"\"Test reading a specific TDWR file.\"\"\"\n f = Level3File(get_test_data('nids/Level3_SLC_TV0_20160516_2359.nids'))\n assert f.prod_desc.prod_code == 182\n\n\ndef test_dhr():\n \"\"\"Test reading a time field for DHR product.\"\"\"\n f = Level3File(get_test_data('nids/KOUN_SDUS54_DHRTLX_201305202016'))\n assert f.metadata['avg_time'] == datetime(2013, 5, 20, 20, 18)\n\n\ndef test_nwstg():\n \"\"\"Test reading a nids file pulled from the NWSTG.\"\"\"\n Level3File(get_test_data('nids/sn.last', as_file_obj=False))\n\n\ndef test_fobj():\n \"\"\"Test reading a specific NEXRAD NIDS files from a file object.\"\"\"\n Level3File(get_test_data('nids/Level3_FFC_N0Q_20140407_1805.nids'))\n\n\ndef test21_precip():\n \"\"\"Test checking whether VCP 21 is precipitation mode.\"\"\"\n assert is_precip_mode(21), 'VCP 21 is precip'\n\n\ndef test11_precip():\n \"\"\"Test checking whether VCP 11 is precipitation mode.\"\"\"\n assert is_precip_mode(11), 'VCP 11 is precip'\n\n\ndef test31_clear_air():\n \"\"\"Test checking whether VCP 31 is clear air mode.\"\"\"\n assert not is_precip_mode(31), 'VCP 31 is not precip'\n\n\ndef test_msg15():\n \"\"\"Check proper decoding of message type 15.\"\"\"\n f = Level2File(get_test_data('KTLX20130520_201643_V06.gz', as_file_obj=False))\n data = f.clutter_filter_map['data']\n assert isinstance(data[0][0], list)\n assert f.clutter_filter_map['datetime'] == datetime(2013, 5, 19, 0, 0, 0, 315000)\n\n\ndef test_tracks():\n \"\"\"Check that tracks are properly decoded.\"\"\"\n f = Level3File(get_test_data('nids/KOUN_SDUS34_NSTTLX_201305202016'))\n for data in f.sym_block[0]:\n if 'track' in data:\n x, y = np.array(data['track']).T\n assert len(x)\n assert len(y)\n\n\ndef test_vector_packet():\n \"\"\"Check that vector packets are properly decoded.\"\"\"\n f = Level3File(get_test_data('nids/KOUN_SDUS64_NHITLX_201305202016'))\n for page in f.graph_pages:\n for item in page:\n if 'vectors' in item:\n x1, x2, y1, y2 = np.array(item['vectors']).T\n assert len(x1)\n assert len(x2)\n assert len(y1)\n assert len(y2)\n", "# Copyright (c) 2016,2017 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"\nWind and Sea Level Pressure Interpolation\n=========================================\n\nInterpolate sea level pressure, as well as wind component data,\nto make a consistent looking analysis, featuring contours of pressure and wind barbs.\n\"\"\"\nimport cartopy\nimport cartopy.crs as ccrs\nfrom matplotlib.colors import BoundaryNorm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom metpy.calc import get_wind_components\nfrom metpy.cbook import get_test_data\nfrom metpy.gridding.gridding_functions import interpolate, remove_nan_observations\nfrom metpy.plots import add_metpy_logo\nfrom metpy.units import units\n\nto_proj = ccrs.AlbersEqualArea(central_longitude=-97., central_latitude=38.)\n\n###########################################\n# Read in data\nwith get_test_data('station_data.txt') as f:\n data = pd.read_csv(f, header=0, usecols=(2, 3, 4, 5, 18, 19),\n names=['latitude', 'longitude', 'slp', 'temperature', 'wind_dir',\n 'wind_speed'],\n na_values=-99999)\n\n###########################################\n# Project the lon/lat locations to our final projection\nlon = data['longitude'].values\nlat = data['latitude'].values\nxp, yp, _ = to_proj.transform_points(ccrs.Geodetic(), lon, lat).T\n\n###########################################\n# Remove all missing data from pressure\nx_masked, y_masked, pres = remove_nan_observations(xp, yp, data['slp'].values)\n\n###########################################\n# Interpolate pressure using Cressman interpolation\nslpgridx, slpgridy, slp = interpolate(x_masked, y_masked, pres, interp_type='cressman',\n minimum_neighbors=1, search_radius=400000, hres=100000)\n\n##########################################\n# Get wind information and mask where either speed or direction is unavailable\nwind_speed = (data['wind_speed'].values * units('m/s')).to('knots')\nwind_dir = data['wind_dir'].values * units.degree\n\ngood_indices = np.where((~np.isnan(wind_dir)) & (~np.isnan(wind_speed)))\n\nx_masked = xp[good_indices]\ny_masked = yp[good_indices]\nwind_speed = wind_speed[good_indices]\nwind_dir = wind_dir[good_indices]\n\n###########################################\n# Calculate u and v components of wind and then interpolate both.\n#\n# Both will have the same underlying grid so throw away grid returned from v interpolation.\nu, v = get_wind_components(wind_speed, wind_dir)\n\nwindgridx, windgridy, uwind = interpolate(x_masked, y_masked, np.array(u),\n interp_type='cressman', search_radius=400000,\n hres=100000)\n\n_, _, vwind = interpolate(x_masked, y_masked, np.array(v), interp_type='cressman',\n search_radius=400000, hres=100000)\n\n###########################################\n# Get temperature information\nx_masked, y_masked, t = remove_nan_observations(xp, yp, data['temperature'].values)\ntempx, tempy, temp = interpolate(x_masked, y_masked, t, interp_type='cressman',\n minimum_neighbors=3, search_radius=400000, hres=35000)\n\ntemp = np.ma.masked_where(np.isnan(temp), temp)\n\n###########################################\n# Set up the map and plot the interpolated grids appropriately.\nlevels = list(range(-20, 20, 1))\ncmap = plt.get_cmap('viridis')\nnorm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)\n\nfig = plt.figure(figsize=(20, 10))\nadd_metpy_logo(fig, 360, 120, size='large')\nview = fig.add_subplot(1, 1, 1, projection=to_proj)\n\nview.set_extent([-120, -70, 20, 50])\nview.add_feature(cartopy.feature.NaturalEarthFeature(category='cultural',\n name='admin_1_states_provinces_lakes',\n scale='50m', facecolor='none'))\nview.add_feature(cartopy.feature.OCEAN)\nview.add_feature(cartopy.feature.COASTLINE)\nview.add_feature(cartopy.feature.BORDERS, linestyle=':')\n\ncs = view.contour(slpgridx, slpgridy, slp, colors='k', levels=list(range(990, 1034, 4)))\nplt.clabel(cs, inline=1, fontsize=12, fmt='%i')\n\nmmb = view.pcolormesh(tempx, tempy, temp, cmap=cmap, norm=norm)\nplt.colorbar(mmb, shrink=.4, pad=0.02, boundaries=levels)\n\nview.barbs(windgridx, windgridy, uwind, vwind, alpha=.4, length=5)\n\nplt.title('Surface Temperature (shaded), SLP, and Wind.')\n\nplt.show()\n" ]
[ [ "numpy.array" ], [ "matplotlib.colors.BoundaryNorm", "matplotlib.pyplot.clabel", "pandas.read_csv", "matplotlib.pyplot.title", "numpy.isnan", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.colorbar", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
Xinrihui/Statistical-Learning-Method
[ "2e1c107bed8a21307596fbe0f8f1daed638657c5" ]
[ "CRF/crf_xrh.py" ]
[ "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport time\n\nimport pickle\nimport codecs\nimport numpy as np\nfrom scipy import optimize\n\n# 将 warnings 也当做错误抛出\n# import warnings\n# warnings.filterwarnings('error')\n\n# 定义负无穷大\ninfinite = float('-inf')\n\nclass LinearCRF(object):\n \"\"\"\n\n 线性链条件随机场\n\n 1. 适用于中文分词任务, 参考 CRF++, 设计如下特征函数:\n\n (1) 状态特征, 一元特征(Unigram)\n ('U', pos, word, tag)\n\n (2) 转移特征, 二元特征(Bigram)\n ('B', pre_tag, tag)\n\n 2.实现了学习算法:\n (1)带正则的梯度上升算法\n (2) 拟牛顿法 L-BGFS ( 调用 scipy.optimize)\n\n 3.实现在已有的预训练的模型的基础上进行更进一步的训练(使用同一个数据集)\n\n 4.实现了 基于维特比算法的 解码\n\n 5.如果觉得 LinearCRF模型训练的慢, 可以读取 CRF++ 库训练得到的参数, 详见 类 CRFSegmentation\n\n\n ref:\n 1.《统计学习方法 第二版》李航\n 2. https://victorjiangxin.github.io/Chinese-Word-Segmentation/\n 3. https://www.cnblogs.com/Determined22/p/6915730.html\n\n Author: xrh\n Date: 2021-06-26\n\n \"\"\"\n def __init__(self):\n\n self.ntags = 4\n self.index_tag = {0:'B', 1:'I', 2:'E', 3:'S'} # 分词场景的 4种隐状态\n self.tag_index = {'B':0, 'I':1, 'E':2, 'S':3}\n\n self.tag_sets = [0, 1, 2, 3]\n\n self.start_tag = 'start' # 补充的句子开始状态\n self.end_tag = 'end' # 补充的句子结束状态\n\n\n self.U_feature_offset = [-2, -1, 0, 1, 2] # 模板中 向前和向后的偏移量\n # 若设置 self.U_feature_offset = [0] 则 在此实现中效果与HMM类似\n\n self.index_feature = {} # eg. {0 : ('U', 1, word, tag)}\n self.feature_index = {} # eg. { ('U', 1, word, tag) : 0 }\n\n self.feature_num = 0 # 特征的个数\n\n self.weights = np.zeros(self.feature_num) # 所有特征的权重\n\n self.theta = 1e-4 # theta should in the range of (1e-6 ~ 1e-3)\n\n\n\n def log_M_at(self,weights, sentence, pre_tag, tag, t):\n \"\"\"\n 计算 log M(yi_1, yi|x) = sum{k}: W_k*F_k( y_i-1, y_i|x)\n\n 依据公式 (11.22) (11.23)\n\n :param sentence: 一个句子词的词的列表\n :param pre_tag:\n :param tag:\n :param t:\n :return:\n \"\"\"\n # (type , offset , word=x[t+offset] , tag=y[t])\n # eg. ('U', 1, '向', 2)\n # 'U': 特征类型为 一元(Unigram);\n # 1: 偏移量, 当前时刻t 向后偏移1个时刻 ;\n # '向': 向后偏移1个时刻后的 x='向' ;\n # 2: y[t]=2, 当前时刻的标签, 2代表标签 'E'\n\n # (type,pre_tag,tag)\n # eg. ('B',1,2)\n # 'B': 特征类型为 二元(Bigram);\n # 1: y[t-1]=1 前一个时刻为状态1\n # 2: y[t]=2 当前时刻为状态2\n\n n=len(sentence)-1\n\n selectd_feature=[] # 被选中的特征列表\n\n if pre_tag == self.start_tag: # 句子的开始\n\n # 一元特征\n for offset in self.U_feature_offset:\n\n pos = t + offset\n if pos >= 1 and pos < n+1: # pos 位置必须合法\n\n feature = ('U', offset, sentence[pos], tag)\n\n if feature in self.feature_index:\n selectd_feature.append( self.feature_index[feature] )\n\n\n elif tag == self.end_tag: # 句子的结束\n\n # 结果要log化, log(1)=0\n res=0\n\n return res\n\n else:\n\n # 一元特征\n for offset in self.U_feature_offset:\n\n pos = t + offset\n if pos >= 1 and pos < n+1: # pos 位置必须合法\n\n feature = ('U', offset, sentence[pos], tag)\n\n\n if feature in self.feature_index:\n selectd_feature.append( self.feature_index[feature] )\n\n # 二元特征\n feature = ('B', pre_tag, tag)\n if feature in self.feature_index:\n selectd_feature.append(self.feature_index[feature])\n\n # 结果要log化, log(exp()) 相当于啥也做, 因此直接求和\n res=np.sum(weights[selectd_feature])\n\n return res\n\n\n def calc_log_M(self, weights,sentence):\n \"\"\"\n 计算状态转移矩阵 M\n 返回 log() 化之后的概率\n\n 依据公式 (11.21) (11.22) (11.23)\n\n :param sentence:\n :return:\n \"\"\"\n n = len(sentence)-1\n\n log_M_list = np.ones((n+2,self.ntags,self.ntags))*infinite\n # 所有时刻的转移矩阵集合; shape: (L_sentence+2, ntags, ntags) 补充上开始时刻和结束时刻, 因此+2\n # 因为取log, M中为0 的元素变为 -inf\n\n for t in range(1,n+2): # t=1,2,.., n+1\n\n if t==1: # 开始\n for tag in self.tag_sets: # [0,1,2,3]\n log_M_list[t][0][tag] = self.log_M_at(weights, sentence, self.start_tag,tag, t)\n\n elif t==n+1: # 结束\n for pre_tag in self.tag_sets: # [0,1,2,3]\n log_M_list[t][pre_tag][0] = self.log_M_at( weights,sentence, pre_tag,self.end_tag, t)\n\n else:\n for pre_tag in self.tag_sets:\n for tag in self.tag_sets:\n log_M_list[t][pre_tag][tag] = self.log_M_at(weights,sentence, pre_tag, tag, t)\n\n return log_M_list\n\n def log_sum_exp(self, a, b):\n \"\"\"\n\n a = [a1, a2, a3]\n b = [b1, b2, b3]\n\n 计算 log(e^a1*e^b1+e^a2*e^b2+e^a3*e^b3)\n\n :param a:\n :param b:\n :return:\n \"\"\"\n\n try:\n c = a + b\n max_value = np.max(c)\n\n if max_value == float('-inf'):\n res=0\n else:\n res = max_value + np.log(np.sum(np.exp(c - max_value)))\n\n except Warning as e: # 捕捉到 Warning\n print(e) # debug 时 , 在此处打断点\n\n return res\n\n\n def calc_log_alpha(self, sentence, log_M=None):\n \"\"\"\n 依据公式 (11.26) (11.27) 前向算法计算 每一个时刻 t 的 alpha ;\n 返回 log() 化之后的概率\n\n :param sentence:\n :param M:\n :return:\n \"\"\"\n n = len(sentence)-1\n\n log_alpha_list = np.ones((n + 2, self.ntags))* float('-inf') # 所有时刻 alpha的集合; shape: (L_sentence+2, ntags)\n\n # 句子的开始\n t=0\n log_alpha_list[t] = np.zeros( self.ntags ) # t=0 alpha 初始化为1, 因为取log 变为0\n\n # 句子中间 和 结束\n for t in range(1, n + 2): # t=1,2,.., n+1\n\n for tag in self.tag_sets: # [0,1,2,3]\n\n log_alpha_list[t][tag] = self.log_sum_exp(log_alpha_list[t-1], log_M[t, : ,tag])\n\n # 句子结束\n # t = L_sentence + 1\n # alpha_list[t][0] = self.log_sum_exp(alpha_list[t-1], M[: ,0])\n\n return log_alpha_list\n\n def calc_log_beta(self, sentence, log_M=None):\n \"\"\"\n 依据公式 (11.29) (11.30) 后向算法计算 每一个时刻 t 的 beta ;\n 返回 log() 化之后的概率\n\n :param sentence:\n :param log_M:\n :return:\n \"\"\"\n n = len(sentence)-1\n\n log_beta_list = np.ones( (n + 2, self.ntags) ) * float('-inf') # 所有时刻的 beta集合; shape: (n+2, ntags)\n\n # 句子结束\n t = n + 1\n log_beta_list[t] = np.zeros(self.ntags) # t=0 beta 初始化为1, 因为取log 变为0\n\n # 句子中间 和 开始\n for t in range(n,-1,-1 ): # t= L_sentence,..,1,0\n\n for tag in self.tag_sets: # [0,1,2,3]\n\n log_beta_list[t][tag] = self.log_sum_exp(log_M[t+1,tag ,:],log_beta_list[t+1])\n\n return log_beta_list\n\n\n\n def calc_log_z(self, alpha):\n \"\"\"\n 依据 公式 (11.31) 下方公式 计算归一化因子 z\n 返回 log() 化之后的概率\n\n :param alpha:\n :return:\n \"\"\"\n return alpha[-1][0]\n\n\n def log_conditional(self, tag_list, log_M,log_z):\n \"\"\"\n 依据公式(11.24) 计算条件概率 p(y|x)\n\n log p(y|x) = log exp(sum(M)) - log Z(x)\n\n 返回 log() 化之后的概率\n\n :param tag_list:\n :param M:\n :return:\n \"\"\"\n n = len(tag_list)-1\n\n log_p=0\n\n for t in range(1, n + 2): # t=1,2,.., n+1\n\n # 开头\n if t==1:\n\n try:\n log_p += log_M[t][0][tag_list[t]] # 取log后 连续相乘变为连续相加\n except Exception as err:\n print(err) # debug 时 , 在此处打断点\n\n # 结尾\n elif t== n+1:\n try:\n log_p += log_M[t][tag_list[t-1]][0]\n except Exception as err:\n print(err) # debug 时 , 在此处打断点\n\n else:\n log_p += log_M[t][tag_list[t-1]][tag_list[t]]\n\n return log_p-log_z # 取log后 除法变减法\n\n def log_marginal(self, tag_list, log_M, log_alpha, log_beta, log_z):\n \"\"\"\n 依据 公式 (11.33) 计算 边缘概率\n log P(yi_1, yi|x) = log alpha(i-1, yi_1) + log M(i, yi_1, yi, x) + log beta(i, yi) - log z(x)\n\n 返回 log() 化之后的概率\n\n :param log_M:\n :param log_alpha:\n :param log_beta:\n :param log_z:\n :return:\n \"\"\"\n T = len(tag_list)-1\n\n log_p = np.zeros((T+2, self.ntags, self.ntags))\n\n for t in range(1, T + 1): # t=1,2,.., L_tag_list\n\n for pre_tag in self.tag_sets: # [0,1,2,3]\n for tag in self.tag_sets: # [0,1,2,3]\n\n log_p[t][pre_tag][tag] = log_alpha[t-1][pre_tag] + log_M[t][pre_tag][tag] + log_beta[t][tag] - log_z\n\n return log_p\n\n def calc_gradient_part2( self,sentence,tag_list, log_M, log_alpha, log_beta, log_z):\n \"\"\"\n 梯度计算 公式中的第二项\n\n sum{i,k}: P(y_i-1, yi|x)*F_k( y_i-1, y_i|x)\n\n :param tag_list:\n :param log_M:\n :param log_alpha:\n :param log_beta:\n :param log_z:\n :return:\n \"\"\"\n T = len(tag_list) - 1\n\n log_p_marginal=self.log_marginal(tag_list, log_M, log_alpha, log_beta, log_z)\n\n gradient_part2 = np.zeros(self.feature_num)\n\n p_marginal= np.exp(log_p_marginal)\n\n for t in range(1, T + 1): # t=1,2,.., T\n\n for pre_tag in self.tag_sets: # [0,1,2,3]\n for tag in self.tag_sets: # [0,1,2,3]\n\n selectd_feature = []\n\n # 一元特征\n for offset in self.U_feature_offset:\n\n pos = t + offset\n if pos >= 1 and pos < T+1: # pos 位置必须合法\n\n feature = ('U', offset, sentence[pos], tag)\n\n if feature in self.feature_index:\n selectd_feature.append(self.feature_index[feature])\n\n if t>1: # t>1 才会考虑 二元特征\n\n # 二元特征\n feature = ('B', pre_tag, tag)\n if feature in self.feature_index:\n selectd_feature.append(self.feature_index[feature])\n\n gradient_part2[selectd_feature] += p_marginal[t, pre_tag, tag]\n\n return gradient_part2\n\n def neg_likelihood_and_gradient(self, weights, feature_count, sentence_list):\n \"\"\"\n 计算 损失函数(极大化 对数似然) 和 损失函数的梯度\n\n 返回 (-损失函数) , (-损失函数的梯度)\n\n :param weights:\n :param feature_count:\n :param sentence_list:\n :return:\n \"\"\"\n self.weights = weights # TODO: 使用 optimize.fmin_l_bfgs_b() 需要加上此行, self.weights 才会更新\n\n likelihood = 0\n gradient_part2 = np.zeros(self.feature_num)\n\n for sentence,tag_list in sentence_list: # 遍历语料库的所有句子\n\n sentence = [''] + sentence # sentence 往后偏移一位\n tag_list = [-1] + tag_list # tag_list 往后偏移一位\n\n # 对于每一个句子, 分别计算 M, alpha, beta, z\n log_M = self.calc_log_M(weights,sentence)\n\n log_alpha = self.calc_log_alpha(sentence,log_M)\n log_beta = self.calc_log_beta(sentence,log_M)\n log_z = self.calc_log_z(log_alpha)\n\n likelihood += self.log_conditional(tag_list,log_M,log_z) # 条件概率 即为损失函数的 经验损失部分\n\n gradient_part2 += self.calc_gradient_part2( sentence,tag_list, log_M, log_alpha, log_beta, log_z) # 梯度计算 公式中的第二项\n\n # 加入正则化项\n likelihood = likelihood - np.dot(weights, weights) * self.theta / 2\n\n gradient_part1 = feature_count # 梯度计算公式中 第一项为: 对于该语料,每一特征出现的次数。\n\n gradient = gradient_part1 - gradient_part2 - (weights * self.theta)\n\n return -likelihood, -gradient\n\n\n def decode(self, sentence):\n \"\"\"\n 给定观测序列 X, 计算得到 P(Y|X) 取得最大值的 隐状态序列 Y\n\n 利用维特比算法, 根据 公式 (11.54) - (11.59)\n\n :param sentence:\n :return:\n \"\"\"\n # sentence = [''] + list(sentence) # sentence 往后偏移一位\n\n sentence = list(sentence)\n\n tag_list =[]\n\n n = len(sentence)\n\n # 记录前一个时刻的 delta\n pre_dp = [0]*self.ntags\n\n # 当前时刻的状态j 是从上一时刻的状态i 转移而来的, 通过pre_state可以还原出最佳路径\n # pre_state = np.zeros((n+2,self.ntags))\n\n pre_state = [[0 for j in range(self.ntags)] for i in range(n)]\n\n # 初始时刻 t=0\n t=0\n for tag in self.tag_sets:\n\n selectd_feature = [] # 被选中的特征列表\n\n # 一元特征\n for offset in self.U_feature_offset:\n\n pos = t + offset\n if pos >= 0 and pos < n : # pos 位置必须合法\n\n # try:\n\n feature = ('U', offset, sentence[pos], tag)\n\n # except Exception as err:\n # print(err) # debug 时 , 在此处打断点\n\n if feature in self.feature_index:\n selectd_feature.append(self.feature_index[feature])\n\n pre_dp[tag] = np.sum( self.weights[selectd_feature] )\n\n for t in range(1,n): # t=1,...,n-1\n\n dp = [0]*self.ntags\n\n for tag in self.tag_sets:\n\n selectd_feature = [] # 被选中的特征列表\n\n # 一元特征 只和当前tag 有关\n for offset in self.U_feature_offset:\n\n pos = t + offset\n if pos >= 0 and pos < n : # pos 位置必须合法\n\n feature = ('U', offset, sentence[pos], tag)\n\n if feature in self.feature_index:\n selectd_feature.append(self.feature_index[feature])\n\n delta_U = np.sum( self.weights[selectd_feature] )\n\n max_B_delta= float('-inf')\n max_B_delta_index = 0\n\n for pre_tag in self.tag_sets:\n\n selectd_feature = [] # 被选中的特征列表\n\n # 二元特征\n feature = ('B', pre_tag, tag)\n if feature in self.feature_index:\n selectd_feature.append(self.feature_index[feature])\n\n B_delta = np.sum( self.weights[selectd_feature] ) + pre_dp[pre_tag]\n\n if B_delta > max_B_delta:\n max_B_delta = B_delta\n max_B_delta_index = pre_tag\n\n dp[tag] = delta_U + max_B_delta\n pre_state[t][tag] = max_B_delta_index\n\n pre_dp = dp\n\n # 最后时刻 T\n state = np.argmax(pre_dp)\n tag_list.append(self.index_tag[state])\n\n # 时刻 T-1,...,1\n for t in range(n-1, 0, -1):\n state = pre_state[t][state]\n tag_list.append(self.index_tag[state])\n\n tag_list.reverse()\n\n return tag_list\n\n\n\n def get_train_data(self,train_data_dir):\n \"\"\"\n 准备训练数据\n\n 分词数据集 pku_training.2col 的内容为:\n\n 迈\tB\n 向\tE\n 充\tB\n 满\tE\n\n 我们将其转换为\n sentence = ['迈','向','充', '满 ']\n tag = [B,E,B,E]\n\n :param train_data_dir:\n :return:\n \"\"\"\n dataset = []\n sentence = [] # 句子\n label = [] # 句子中每一个字的标注\n\n word_dict = set() # 记录语料库中的字的种类\n tag_dict = set() # 记录语料库中的标签的种类\n\n print('loading dateset ,dir: {}'.format(train_data_dir))\n\n with open(train_data_dir, encoding='utf-8') as f:\n\n for line in f.readlines():\n # 读到的每行最后都有一个\\n,使用strip将最后的回车符去掉\n line = line.strip()\n\n line_arr = line.split('\\t') # 分隔符为 '\\t'\n\n if len(line_arr) > 1: # 说明还在一个句子中\n sentence.append(line_arr[0])\n label.append(self.tag_index[line_arr[1]])\n\n word_dict.add(line_arr[0])\n tag_dict.add(line_arr[1])\n\n else: # 说明此句子已经结束\n dataset.append((sentence, label))\n sentence = []\n label = []\n\n print('sentence Num: {}'.format(len(dataset))) # pku 语料库中的句子个数 19056\n\n return dataset,word_dict,tag_dict\n\n def build_feature_set(self,dataset, word_dict):\n \"\"\"\n 建立条件随机场的分词特征\n\n 1.状态特征, 简化为与时间 t无关:\n (type , offset , word=x[t+offset] , tag=y[t])\n\n x y\n 迈\tB\n 向\tE <-t\n 充\tB\n 满\tE\n\n index_tag: {0:'B', 1:'I', 2:'E', 3:'S'}\n\n eg.\n\n ('U', 0, '向', 2)\n 'U': 特征类型为 一元 (Unigram);\n 0 : 当前时刻 t ;\n '向': 当前时刻 x[t]='向' ;\n 2: y[t]=2, 当前时刻的标签, 2代表标签 'E'\n\n ('U', -1, '迈', 2)\n 'U': 特征类型为 一元 (Unigram);\n -1 : 偏移量, 当前时刻 t 向前偏移1个时刻 ;\n '迈': 向前偏移1个时刻后的 x[t-1]='迈' ;\n 2: y[t]=2, 当前时刻的标签, 2代表标签 'E'\n\n ('U', 1, '充', 2)\n 'U': 特征类型为 一元 (Unigram);\n 1 : 偏移量, 当前时刻 t 向后偏移1个时刻 ;\n '充': 向后偏移1个时刻后的 x[t+1]='充' ;\n 2: y[t]=2, 当前时刻的标签, 2代表标签 'E'\n\n 上述例子表明: t 时刻的隐藏状态 y[t] 不仅仅和 t时刻的观测 x[t]有关,\n 还和 t-1 时刻的观测 x[t-1] 与 t+1 时刻的观测 x[t+1] 有关\n\n 2.转移特征, 简化为与 时间 t 和 观测 x 无关\n\n (type,pre_tag=y[t-1],tag=y[t])\n\n eg. ('B',0,2)\n 'B': 特征类型为 二元 (Bigram);\n 1: y[t-1]=0 前一个时刻为 状态0 ,0代表标签 'B'\n 2: y[t]=2 当前时刻为 状态2 ,2代表标签 'E'\n\n 3.特征的总数量为 Ucount×Vword×m + m×m\n\n (1) 状态特征个数为 Ucount×Vword×m\n 其中 Ucount表示 U特征提取的位置数目,比如只提取 当前文字前1个,当前文字,当前文字后一个,则此时 Ucount=3,\n Vword为语料库中文字数量,m表示 标签数。\n\n (2) 转移特征个数为 m×m\n 二元特征表达了 前一个标签到当前标签的转移概率, 标签个数为m , 一共有 mxm 种转移\n\n\n :param dataset:\n :param word_dict:\n :return:\n \"\"\"\n print('building the feature set...')\n\n feature_idx = 0\n index_feature = {} # eg. {0 : ('U', 1, word, tag)}\n feature_index = {} # eg. { ('U', 1, word, tag) : 0 }\n\n # 1. 构建状态特征\n\n # for word in word_dict:\n # for offset in self.U_feature_offset: # [-2, -1, 0, 1, 2]\n # for tag in self.tag_sets: # [0,1,2,3] -> {'B':0, 'I':1, 'E':2, 'S':3}\n #\n # feature=('U', offset, word, tag)\n # index_feature[feature_idx]=feature\n # feature_index[feature]=feature_idx\n # feature_idx+=1\n\n for sentence, tag_list in dataset: # 遍历所有的句子\n\n n = len(sentence)\n\n for t in range(n): # 遍历整个句子\n for offset in self.U_feature_offset: # [-2, -1, 0, 1, 2]\n for tag in self.tag_sets: # [0, 1, 2, 3]\n\n pos = t + offset\n if pos >= 0 and pos < n: # pos 位置必须合法\n\n feature = ('U', offset, sentence[pos], tag)\n\n if feature not in feature_index:\n feature_index[feature] = feature_idx\n index_feature[feature_idx] = feature\n feature_idx += 1\n\n # 2. 构建转移特征\n for pre_tag in self.tag_sets: # [0, 1, 2, 3]\n for tag in self.tag_sets:\n feature = ('B', pre_tag, tag)\n index_feature[feature_idx] = feature\n feature_index[feature] = feature_idx\n feature_idx += 1\n\n self.feature_num = len(feature_index) # 特征的个数 93976\n print('feature_num :{}'.format(self.feature_num))\n\n return feature_index,index_feature\n\n def statistic_feature_in_dataset(self, dataset):\n \"\"\"\n 统计所有特征在语料库中的出现次数\n\n :param dataset:\n :return:\n \"\"\"\n\n feature_count = np.zeros(self.feature_num) # 特征的出现次数\n\n for sentence, tag_list in dataset: # 遍历语料库的所有句子\n\n for t in range(len(sentence)):\n\n # 状态特征\n # (type , offset , x[t+offset] , y[t])\n tag = tag_list[t]\n\n for offset in self.U_feature_offset: # [-2, -1, 0, 1, 2]\n\n pos = t + offset\n if pos >= 0 and pos < len(sentence): # pos 位置必须合法\n\n feature = ('U', offset, sentence[pos], tag)\n\n # try:\n feature_count[self.feature_index[feature]] += 1\n # except Exception as err:\n # print(err) # debug 时 , 在此处打断点\n\n # 转移特征\n # (type,pre_tag,tag)\n if t - 1 >= 0:\n pre_tag = tag_list[t - 1]\n feature = ('B', pre_tag, tag)\n feature_count[self.feature_index[feature]] += 1\n\n return feature_count\n\n def batch_gradient_ascent(self,feature_weights,feature_count, dataset,max_iter,learning_rate=0.01):\n \"\"\"\n 批量梯度上升(带正则项) 优化算法\n\n ref:\n https://victorjiangxin.github.io/Chinese-Word-Segmentation/\n\n :param feature_weights:\n :param feature_count:\n :param dataset:\n :param max_iter:\n :param learning_rate:\n :return:\n \"\"\"\n for epcho in range(max_iter):\n\n start_time = time.time()\n\n likelihood, gradient = self.neg_likelihood_and_gradient(feature_weights, feature_count, dataset)\n\n feature_weights -= learning_rate * gradient # 更新权重, gradient 为负, 这里负负为正\n\n print('epcho:{} loss: {}'.format(epcho, likelihood / 1000))\n\n print(\"Training one epcho time:{}s\".format(time.time() - start_time))\n\n\n def L_BFGS(self,feature_weights,feature_count, dataset,max_iter):\n \"\"\"\n 拟牛顿法 L-BFGS\n\n ref:\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html\n https://github.com/VictorJiangXin/Linear-CRF/blob/master/src/crf.py\n\n\n :param feature_weights:\n :param feature_count:\n :param dataset:\n :param max_iter:\n :param learning_rate:\n :return:\n \"\"\"\n start_time = time.time()\n\n func = lambda weights: self.neg_likelihood_and_gradient(weights, feature_count, dataset)\n\n res = optimize.fmin_l_bfgs_b(func, x0=feature_weights, iprint=0, disp=1, maxiter=max_iter, maxls=100)\n #TODO: def fmin_l_bfgs_b -> def _minimize_lbfgsb 中\n # task_str = task.tostring() 会导致报错, 按照提示将其改为\n # task_str = task.tobytes() 原因未知\n\n print(\"Training time:{}s\".format(time.time() - start_time))\n\n return\n\n def fit(self, train_data_dir, max_iter,learning_rate=0.01, pre_train_model=None):\n \"\"\"\n 模型训练 主流程\n\n :param train_data_dir:\n :param max_iter: 迭代次数\n :param learning_rate: 梯度上升的学习率\n :return:\n \"\"\"\n\n if pre_train_model == None:\n\n dataset, word_dict, tag_dict = self.get_train_data(train_data_dir)\n\n assert tag_dict == self.tag_index.keys() # 语料库中的标签的种类 需要与预设的一致\n\n self.feature_index,self.index_feature=self.build_feature_set(dataset, word_dict)\n\n feature_count=self.statistic_feature_in_dataset(dataset)\n\n feature_weights = np.random.randn(self.feature_num) # 所有特征的权重初始化\n\n else: # 在预训练模型的基础上 进行训练\n\n self.load(pre_train_model)\n\n feature_weights = self.weights\n\n dataset, word_dict, tag_dict = self.get_train_data(train_data_dir)\n\n feature_count = self.statistic_feature_in_dataset(dataset)\n\n\n print(\"Start training...\")\n\n\n # 可以看出 普通的梯度上升算法收敛地很慢, 迭代次数 max_iter=100 才会收敛\n # self.weights = self.batch_gradient_ascent(feature_weights,feature_count, dataset,max_iter,learning_rate)\n\n self.L_BFGS(feature_weights,feature_count, dataset,max_iter=max_iter)\n\n self.save(train_data_dir) # 保存训练完的模型\n\n\n def save(self, train_data_dir):\n \"\"\"\n 保存训练好的 CRF 模型\n\n :param train_data_dir:\n :return:\n \"\"\"\n model_dir = 'model/'\n model_file_name = train_data_dir.split('/')[-1] + '.model' # 取数据集的名字作为模型的名字\n\n save_dict = {}\n save_dict['ntags'] = self.ntags\n save_dict['index_tag'] = self.index_tag\n save_dict['tag_index'] = self.tag_index\n save_dict['feature_index'] = self.feature_index\n save_dict['index_feature'] = self.index_feature\n save_dict['feature_num'] = self.feature_num\n save_dict['weights'] = self.weights\n with open(model_dir+model_file_name, 'wb') as f:\n pickle.dump(save_dict, f)\n\n print(\"Save model successful!\")\n\n\n def load(self, file_path):\n \"\"\"\n 读取预训练的 CRF 模型\n\n :param file_path:\n :return:\n \"\"\"\n\n with open(file_path, 'rb') as f:\n save_dict = pickle.load(f)\n\n self.ntags = save_dict['ntags']\n self.index_tag = save_dict['index_tag']\n self.tag_index = save_dict['tag_index']\n self.feature_index = save_dict['feature_index']\n self.index_feature = save_dict['index_feature']\n self.feature_num = save_dict['feature_num']\n self.weights = save_dict['weights']\n\n print(\"Load model successful!\")\n\n\n def load_crfpp_model(self, model_path):\n \"\"\"\n 导入通过 crf++(crfcpp) 包训练的模型\n\n :param model_path:\n :return:\n \"\"\"\n\n with open(model_path, 'r',encoding='utf-8') as f:\n lines = f.readlines()\n\n tags_id = 0\n\n i = 0\n # print plus information\n while i < len(lines) and lines[i] != '\\n':\n line = lines[i].strip()\n print(line)\n i += 1\n\n i += 1\n # get tags\n while i < len(lines) and lines[i] != '\\n':\n line = lines[i].strip()\n self.tag_index[line] = tags_id\n self.index_tag[tags_id] = line\n tags_id += 1\n i += 1\n\n self.ntags = len(self.tag_index)\n print(self.tag_index)\n\n i += 1\n # map\n feature_map = {} # {'U00', -2}\n self.U_feature_offset = []\n while i < len(lines) and lines[i] != '\\n':\n line = lines[i].strip()\n if line != 'B':\n feature_template = line.split(':')[0]\n pos = line.split('[')[1].split(',')[0]\n feature_map[feature_template] = int(pos)\n self.U_feature_offset.append(int(pos))\n i += 1\n print('self.U_feature_offset', self.U_feature_offset)\n print('feature_map:', feature_map)\n\n i += 1\n\n # construct feature\n feature_id = 0\n feature_id_weight_index = {} # in model.txt weight are not in\n while i < len(lines) and lines[i] != '\\n':\n weight_index = int(lines[i].strip().split()[0])\n line = lines[i].strip().split()[1]\n if line == 'B':\n for tag_pre in range(self.ntags):\n for tag_now in range(self.ntags):\n feature = ('B', tag_pre, tag_now)\n self.feature_index[feature] = feature_id\n self.index_feature[feature_id] = feature\n feature_id_weight_index[feature_id] = weight_index\n weight_index += 1\n feature_id += 1\n else:\n feature_template = line.split(':')[0]\n word = line.split(':')[1]\n pos = feature_map[feature_template]\n for tag in range(self.ntags):\n feature = ('U', pos, word, tag)\n self.feature_index[feature] = feature_id\n self.index_feature[feature_id] = feature\n feature_id_weight_index[feature_id] = weight_index\n weight_index += 1\n feature_id += 1\n i += 1\n\n print('Total features:', len(self.feature_index))\n i += 1\n # read weights\n self.feature_num = len(self.feature_index)\n self.weights = np.zeros(self.feature_num)\n\n weights_in_file = []\n\n while i < len(lines) and lines[i] != '\\n':\n line = lines[i].strip()\n weights_in_file.append(float(line))\n i += 1\n\n for feature_id in feature_id_weight_index:\n\n # try:\n self.weights[feature_id] = weights_in_file[feature_id_weight_index[feature_id]]\n\n # except Exception as err:\n # print(err) # debug 时 , 在此处打断点\n\n print('Record weights = ', feature_id)\n\n # print(\"The last feature is {}, it's weight is {}\".format(\n # self.index_feature[feature_id-1], self.weights[feature_id-1]))\n\n print(\"Load crfcpp model successful!\")\n\nclass Report:\n\n def compare_line(self, reference, candidate):\n \"\"\"\n 输出标准标注结果 与 模型预测结果的重合部分的长度, 依据此可以计算出 P,R F1 值\n\n eg.\n\n 标准分词 A:['结婚',' 的',' 和',' 尚未',' 结婚 ','的']\n\n 标准区间 A:[1,2],[3,3],[4,4],[5,6],[7,8],[9,9] 一共 6个区间\n\n 分词结果 B:['结婚',' 的','和尚','未结婚 ','的 ']\n\n 分词区间 B:[1,2],[3,3],[4,5],[6,7,8],[9,9] 一共 5个区间\n\n A 和 B 的相同区间为 [1,2],[3,3],[9,9] 一共 3个区间\n\n ref_words_len = 6\n can_words_len = 5\n acc_word_len = 3\n\n Precision = 3/5\n Recall = 3/6\n\n ref:\n https://zhuanlan.zhihu.com/p/100552669\n\n :param reference: 标注的分词结果\n :param candidate: 模型预测的分词结果\n :return:\n \"\"\"\n\n\n ref_words = reference.split()\n can_words = candidate.split()\n\n ref_words_len = len(ref_words)\n can_words_len = len(can_words)\n\n ref_index = []\n index = 0\n for word in ref_words:\n word_index = [index]\n index += len(word)\n word_index.append(index)\n ref_index.append(word_index)\n\n can_index = []\n index = 0\n for word in can_words:\n word_index = [index]\n index += len(word)\n word_index.append(index)\n can_index.append(word_index)\n\n tmp = [val for val in ref_index if val in can_index]\n acc_word_len = len(tmp)\n\n return ref_words_len, can_words_len, acc_word_len\n\nclass CRFSegmentation(object):\n \"\"\"\n 基于 CRF 的分词器, 功能包括:\n\n 1.可以选择导入 LinearCRF 模型训练得到的参数, 还是 通过 CRF++ 库训练得到的参数\n 2.对单个句子分词\n 3.对整个文档分词并输出\n 4.将分词后的结果与标注文档比较, 输出文档分词的分数\n\n Author: xrh\n Date: 2021-06-30\n\n \"\"\"\n\n def __init__(self, model_path='lib/model/pku_model.txt',use_crfcpp=True):\n \"\"\"\n\n :param model_path:\n :param use_crfcpp: 是否使用 CRF++ 训练的模型\n \"\"\"\n\n self.crf = LinearCRF()\n\n if use_crfcpp:\n\n print('loading the crfcpp model, dir: {}'.format(model_path))\n\n self.crf.load_crfpp_model(model_path)\n\n else:\n\n print('loading model, dir: {}'.format(model_path))\n\n self.crf.load(model_path)\n\n def cut(self, sentence):\n \"\"\"\n 对句子进行分词, 词与词之间使用 空格间隔\n\n :param sentence:\n :return:\n \"\"\"\n\n sentence=sentence.strip()\n\n tag_list = self.crf.decode(sentence)\n\n out_str = ''\n\n for i in range(len(tag_list)):\n\n try:\n out_str = out_str + sentence[i]\n\n except Exception as err:\n print(err) # debug 时 , 在此处打断点\n\n if tag_list[i] in {'E', 'S'}: # 词语的结束标志\n out_str = out_str + ' '\n\n return out_str\n\n\n def cut_doc(self, in_dir, out_dir):\n \"\"\"\n 对整个文档进行分词并输出\n\n :param in_dir:\n :param out_dir:\n :return:\n \"\"\"\n # 初始化 输出\n output_lines = []\n\n with open(in_dir, encoding='utf-8') as f:\n # 按行读取文件\n for line in f.readlines():\n # 读到的每行最后都有一个\\n,使用strip将最后的回车符去掉\n line = line.strip()\n # 将该行放入文章列表中\n output_lines.append(self.cut(line)+'\\n')\n\n\n # 设置定长缓冲区\n with open(out_dir, 'w+', encoding='utf-8', buffering=200) as f:\n f.writelines(output_lines)\n\n def score_cut_doc(self, in_file,ref_file, out_file):\n \"\"\"\n 对原始文档进行分词, 并对分词结果进行评价\n 评价指标包括 precison, recall, f1\n\n ref:\n https://zhuanlan.zhihu.com/p/100552669\n\n :param in_file:原始文档(未分词)\n :param ref_file: 标注文档\n :param out_file: 分词后的结果文档\n :return:\n \"\"\"\n\n self.cut_doc(in_file,out_file)\n\n report = Report()\n\n fref = open(ref_file, 'r', encoding='utf8')\n fcan = open(out_file, 'r', encoding='utf8')\n\n reference_all = fref.readlines()\n candidate_all = fcan.readlines()\n fref.close()\n fcan.close()\n\n ref_count = 0\n can_count = 0\n acc_count = 0\n for reference, candidate in zip(reference_all, candidate_all):\n reference = reference.strip()\n candidate = candidate.strip()\n\n ref_words_len, can_words_len, acc_word_len = report.compare_line(reference, candidate)\n ref_count += ref_words_len\n can_count += can_words_len\n acc_count += acc_word_len\n\n P = acc_count / can_count\n R = acc_count / ref_count\n F1 = (2 * P * R) / (P + R)\n\n print('Precision:', P)\n print('Recall:', R)\n print('F1:', F1)\n\n\n\nclass Test:\n\n def train_model(self):\n\n np.random.seed(0) # 设置随机数 种子\n\n crf = LinearCRF()\n # crf.fit('../dataset/ChineseCutWord/pku_training.2col.tiny',max_iter=10,learning_rate=0.01)\n\n # crf.fit('../dataset/ChineseCutWord/pku_training.2col.small', max_iter=100, learning_rate=0.01) # Training time:40111s\n\n crf.fit('../dataset/ChineseCutWord/pku_training.2col', max_iter=20, learning_rate=0.01,pre_train_model='model/pku_training.2col.model') # 原始数据集跑的太慢了\n\n\n def test_cut_sentence(self):\n\n seg = CRFSegmentation('model/crf_pku_small.model',use_crfcpp=False)\n\n # seg = CRFSegmentation()\n\n sen1 = '今晚的月色真美呀!'\n sen2 = '生命在于奋斗!'\n sen3 = '小哥哥,别复习了,来玩吧!'\n\n print('测试句:', sen1)\n print('分词后:', seg.cut(sen1))\n\n print('测试句:', sen2)\n print('分词后:', seg.cut(sen2))\n\n print('测试句:', sen3)\n print('分词后:', seg.cut(sen3))\n\n def test_cut_doc_and_eval(self):\n\n # seg = CRFSegmentation()\n\n seg = CRFSegmentation('model/pku_training.2col.model',use_crfcpp=False) # F1: 0.46\n\n # 使用 微博的测试数据\n seg.score_cut_doc(in_file='test/data/test_weibo.txt',\n ref_file='test/data/weibo.txt',\n out_file='test/result/weibo_crf_result.txt')\n\n # 使用 PKU的测试数据\n # seg.score_cut_doc(in_file='test/data/test_pku.txt',\n # ref_file='test/data/pku.txt',\n # out_file='test/result/pku_crf_result.txt')\n\n\nif __name__ == '__main__':\n\n test = Test()\n\n # test.train_model()\n\n # test.test_cut_sentence()\n\n test.test_cut_doc_and_eval()\n\n\n\n\n\n" ]
[ [ "numpy.dot", "numpy.random.seed", "scipy.optimize.fmin_l_bfgs_b", "numpy.ones", "numpy.max", "numpy.argmax", "numpy.random.randn", "numpy.exp", "numpy.zeros", "numpy.sum" ] ]
mariogeiger/gradientflow
[ "16749d538ca1caf3052ca50bfbdbabb61bd8e3a1" ]
[ "gradientflow/_backprop.py" ]
[ "# pylint: disable=no-member, invalid-name, line-too-long\n\"\"\"\nGradient flow for any model using pytorch backprop\n\"\"\"\nimport copy\nimport math\n\nimport torch\n\nfrom ._flow import flow\nfrom ._gradient import gradient\n\n\nclass _ContinuousMomentum(torch.optim.Optimizer):\n r\"\"\"Implements a continuous version of momentum.\n\n d/dt velocity = -1/tau (velocity + grad)\n or\n d/dt velocity = -mu/t (velocity + grad)\n\n d/dt parameters = velocity\n \"\"\"\n\n def __init__(self, params, dt, tau):\n defaults = dict(dt=dt, tau=tau)\n super().__init__(params, defaults)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable): A closure that reevaluates the model and\n returns the loss. Optional for most optimizers.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n tau = group['tau']\n dt = group['dt']\n\n for p in group['params']:\n if p.grad is None:\n continue\n\n param_state = self.state[p]\n if 't' not in param_state:\n t = param_state['t'] = 0\n else:\n t = param_state['t']\n\n v = None\n if tau != 0:\n if 'velocity' not in param_state:\n v = param_state['velocity'] = torch.zeros_like(p.data)\n else:\n v = param_state['velocity']\n\n if tau > 0:\n x = math.exp(-dt / tau)\n v.mul_(x).add_(-(1 - x) * p.grad.data)\n elif tau < 0:\n mu = -tau\n x = (t / (t + dt)) ** mu\n v.mul_(x).add_(-(1 - x) * p.grad.data)\n else:\n v = -p.grad.data\n\n p.data.add_(dt * v)\n param_state['t'] += dt\n\n return loss\n\n\ndef _make_step(f, optimizer, dt, grad):\n \"\"\"\n internal function\n \"\"\"\n i = 0\n for p in f.parameters():\n n = p.numel()\n p.grad = grad[i: i + n].view_as(p)\n i += n\n\n for param_group in optimizer.param_groups:\n param_group['dt'] = dt\n\n optimizer.step()\n\n for p in f.parameters():\n p.grad = None\n\n\ndef _getitems(x, ii):\n if isinstance(x, list):\n return [x[i.item()] for i in ii]\n try:\n return x[ii]\n except TypeError:\n return [x[i.item()] for i in ii]\n\n\ndef _output_gradient(f, loss, x, y, out0, chunk):\n \"\"\"\n internal function\n \"\"\"\n out = []\n grad = 0\n loss_value = 0\n for i in [slice(i, i + chunk) for i in range(0, len(x), chunk)]:\n o = f(x[i]) - out0[i]\n l = loss(o, y[i])\n assert l.shape == (len(o),)\n l = l.sum() / len(x)\n grad += gradient(l, f.parameters())\n out.append(o)\n loss_value += l.item()\n return torch.cat(out), grad, loss_value\n\n\ndef gradientflow_backprop(f0, x, y, loss, subf0=False, tau=0, chunk=None, batch=None, max_dgrad=1e-3, max_dout=1, checkpoints=None):\n \"\"\"\n gradientflow on a torch.nn.Model using backprop\n :param f0: torch.nn.Model\n :param x: torch.Tensor [z, ...]\n :param y: torch.Tensor [z, ...]\n :param loss: function: outputs, labels -> losses\n \"\"\"\n\n if chunk is None:\n chunk = len(x)\n\n if batch is None:\n batch = len(x)\n\n f = copy.deepcopy(f0)\n\n with torch.no_grad():\n with torch.no_grad():\n out0 = []\n for i in [slice(i, i + chunk) for i in range(0, len(x), chunk)]:\n out0.append(f0(x[i]))\n out0 = torch.cat(out0)\n if isinstance(subf0, bool):\n if not subf0:\n out0 = torch.zeros_like(out0)\n else:\n assert out0.shape == subf0.shape\n out0 = subf0\n\n def prepare(sta, t, old_data, old_t):\n sta = copy.deepcopy(sta)\n ff = copy.deepcopy(f)\n ff.load_state_dict(sta[0])\n\n if old_data is not None:\n if old_t == t:\n if batch == len(x):\n return old_data\n bi = torch.randperm(len(x))[:batch].sort().values\n else:\n bi = old_data[3]\n else:\n bi = torch.randperm(len(x))[:batch].sort().values\n\n out, grad, loss_value = _output_gradient(ff, loss, _getitems(x, bi), _getitems(y, bi), out0[bi], chunk)\n\n return out, grad, loss_value, bi\n\n def make_step(sta, data, _t, dt):\n sta = copy.deepcopy(sta)\n ff = copy.deepcopy(f)\n optimizer = _ContinuousMomentum(ff.parameters(), dt=0, tau=tau)\n\n ff.load_state_dict(sta[0])\n optimizer.load_state_dict(sta[1])\n _out, grad, _loss_value, _bi = data\n\n _make_step(ff, optimizer, dt, grad)\n return ff.state_dict(), optimizer.state_dict()\n\n def compare(data, new_data):\n out, grad, _loss_value, bi = data\n new_out, new_grad, _new_loss_value, new_bi = new_data\n\n assert bi.eq(new_bi).all()\n\n dout = (out - new_out).abs().max().item()\n if grad.norm() == 0 or new_grad.norm() == 0:\n dgrad = 0\n else:\n dgrad = (grad - new_grad).norm().pow(2).div(grad.norm() * new_grad.norm()).item()\n\n return dgrad / max_dgrad, dout / max_dout\n\n opt = _ContinuousMomentum(f.parameters(), dt=0, tau=tau)\n\n for state, internals in flow((f.state_dict(), opt.state_dict()), prepare, make_step, compare, checkpoints=checkpoints):\n out, grad, loss_value, bi = internals['data']\n f.load_state_dict(internals['x'][0])\n\n state = {\n 'step': state['step'],\n 't': state['t'],\n 'loss': loss_value,\n 'dt': state['dt'],\n 'dgrad': max_dgrad * state['d'][0] if state['d'] is not None else 0,\n 'dout': max_dout * state['d'][1] if state['d'] is not None else 0,\n }\n internals = {\n 'f': f,\n 'output': out,\n 'output0': out0[bi],\n 'gradient': grad,\n 'batch_indices': bi,\n 'changed_dt': internals['changed_dt']\n }\n yield state, internals\n" ]
[ [ "torch.zeros_like", "torch.no_grad", "torch.cat" ] ]
tund/TensorFlowASR
[ "a9b085095b2335ebbdaa2e78601b57073224f804", "a9b085095b2335ebbdaa2e78601b57073224f804" ]
[ "tensorflow_asr/models/ctc.py", "tensorflow_asr/utils/__init__.py" ]
[ "# Copyright 2020 Huy Le Nguyen (@usimarit)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom . import Model\nfrom ..featurizers.speech_featurizers import TFSpeechFeaturizer\nfrom ..featurizers.text_featurizers import TextFeaturizer\nfrom ..utils.utils import shape_list\n\n\nclass CtcModel(Model):\n def __init__(self, **kwargs):\n super(CtcModel, self).__init__(**kwargs)\n\n def _build(self, input_shape):\n features = tf.keras.Input(input_shape, dtype=tf.float32)\n self(features, training=False)\n\n def add_featurizers(self,\n speech_featurizer: TFSpeechFeaturizer,\n text_featurizer: TextFeaturizer):\n self.speech_featurizer = speech_featurizer\n self.text_featurizer = text_featurizer\n\n def call(self, inputs, training=False):\n raise NotImplementedError()\n\n # -------------------------------- GREEDY -------------------------------------\n\n @tf.function\n def recognize(self, signals):\n\n def extract_fn(signal): return self.speech_featurizer.tf_extract(signal)\n\n features = tf.map_fn(extract_fn, signals, fn_output_signature=tf.TensorSpec(self.speech_featurizer.shape, dtype=tf.float32))\n logits = self(features, training=False)\n probs = tf.nn.softmax(logits)\n\n def map_fn(prob): return tf.numpy_function(self.perform_greedy, inp=[prob], Tout=tf.string)\n\n return tf.map_fn(map_fn, probs, fn_output_signature=tf.TensorSpec([], dtype=tf.string))\n\n def perform_greedy(self, probs: np.ndarray):\n from ctc_decoders import ctc_greedy_decoder\n decoded = ctc_greedy_decoder(probs, vocabulary=self.text_featurizer.vocab_array)\n return tf.convert_to_tensor(decoded, dtype=tf.string)\n\n def recognize_tflite(self, signal):\n \"\"\"\n Function to convert to tflite using greedy decoding\n Args:\n signal: tf.Tensor with shape [None] indicating a single audio signal\n\n Return:\n transcript: tf.Tensor of Unicode Code Points with shape [None] and dtype tf.int32\n \"\"\"\n features = self.speech_featurizer.tf_extract(signal)\n features = tf.expand_dims(features, axis=0)\n input_length = shape_list(features)[1]\n input_length = input_length // self.base_model.time_reduction_factor\n input_length = tf.expand_dims(input_length, axis=0)\n logits = self(features, training=False)\n probs = tf.nn.softmax(logits)\n decoded = tf.keras.backend.ctc_decode(\n y_pred=probs, input_length=input_length, greedy=True\n )\n decoded = tf.cast(decoded[0][0][0], dtype=tf.int32)\n transcript = self.text_featurizer.indices2upoints(decoded)\n return transcript\n\n # -------------------------------- BEAM SEARCH -------------------------------------\n\n @tf.function\n def recognize_beam(self, signals, lm=False):\n\n def extract_fn(signal): return self.speech_featurizer.tf_extract(signal)\n\n features = tf.map_fn(extract_fn, signals, fn_output_signature=tf.TensorSpec(self.speech_featurizer.shape, dtype=tf.float32))\n logits = self(features, training=False)\n probs = tf.nn.softmax(logits)\n\n def map_fn(prob): return tf.numpy_function(self.perform_beam_search, inp=[prob, lm], Tout=tf.string)\n\n return tf.map_fn(map_fn, probs, dtype=tf.string)\n\n def perform_beam_search(self, probs: np.ndarray, lm: bool = False):\n from ctc_decoders import ctc_beam_search_decoder\n decoded = ctc_beam_search_decoder(\n probs_seq=probs,\n vocabulary=self.text_featurizer.vocab_array,\n beam_size=self.text_featurizer.decoder_config[\"beam_width\"],\n ext_scoring_func=self.text_featurizer.scorer if lm else None\n )\n decoded = decoded[0][-1]\n\n return tf.convert_to_tensor(decoded, dtype=tf.string)\n\n def recognize_beam_tflite(self, signal):\n \"\"\"\n Function to convert to tflite using beam search decoding\n Args:\n signal: tf.Tensor with shape [None] indicating a single audio signal\n\n Return:\n transcript: tf.Tensor of Unicode Code Points with shape [None] and dtype tf.int32\n \"\"\"\n features = self.speech_featurizer.tf_extract(signal)\n features = tf.expand_dims(features, axis=0)\n input_length = shape_list(features)[1]\n input_length = input_length // self.base_model.time_reduction_factor\n input_length = tf.expand_dims(input_length, axis=0)\n logits = self(features, training=False)\n probs = tf.nn.softmax(logits)\n decoded = tf.keras.backend.ctc_decode(\n y_pred=probs, input_length=input_length, greedy=False,\n beam_width=self.text_featurizer.decoder_config[\"beam_width\"]\n )\n decoded = tf.cast(decoded[0][0][0], dtype=tf.int32)\n transcript = self.text_featurizer.indices2upoints(decoded)\n return transcript\n\n # -------------------------------- TFLITE -------------------------------------\n\n def make_tflite_function(self, greedy: bool = False):\n if greedy:\n return tf.function(\n self.recognize_tflite,\n input_signature=[\n tf.TensorSpec([None], dtype=tf.float32)\n ]\n )\n return tf.function(\n self.recognize_beam_tflite,\n input_signature=[\n tf.TensorSpec([None], dtype=tf.float32)\n ]\n )\n", "# Copyright 2020 Huy Le Nguyen (@usimarit)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef setup_environment(): # Set memory growth and only log ERRORs\n \"\"\" Setting tensorflow running environment \"\"\"\n import os\n import warnings\n\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"1\"\n warnings.simplefilter(\"ignore\")\n\n import tensorflow as tf\n\n tf.get_logger().setLevel(\"ERROR\")\n\n tf.config.optimizer.set_experimental_options({\"auto_mixed_precision\": True})\n\n\ndef setup_devices(devices, cpu=False):\n \"\"\"Setting visible devices\n\n Args:\n devices (list): list of visible devices' indices\n \"\"\"\n import tensorflow as tf\n\n if cpu:\n cpus = tf.config.list_physical_devices(\"CPU\")\n tf.config.set_visible_devices(cpus, \"CPU\")\n else:\n gpus = tf.config.list_physical_devices(\"GPU\")\n if gpus:\n visible_gpus = [gpus[i] for i in devices]\n tf.config.set_visible_devices(visible_gpus, \"GPU\")\n print(\"Run on\", len(visible_gpus), \"Physical GPUs\")\n\n\ndef setup_strategy(devices):\n \"\"\"Setting mirrored strategy for training\n\n Args:\n devices (list): list of visible devices' indices\n\n Returns:\n tf.distribute.Strategy: MirroredStrategy for training one or multiple gpus\n \"\"\"\n import tensorflow as tf\n\n setup_devices(devices)\n\n return tf.distribute.MirroredStrategy()\n\n\n# def setup_tpu(tpu_address):\n# import tensorflow as tf\n\n# resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + tpu_address)\n# tf.config.experimental_connect_to_cluster(resolver)\n# tf.tpu.experimental.initialize_tpu_system(resolver)\n# print(\"All TPUs: \", tf.config.list_logical_devices('TPU'))\n# return resolver\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.nn.softmax", "tensorflow.keras.Input", "tensorflow.cast", "tensorflow.expand_dims", "tensorflow.map_fn", "tensorflow.keras.backend.ctc_decode", "tensorflow.numpy_function", "tensorflow.TensorSpec" ], [ "tensorflow.distribute.MirroredStrategy", "tensorflow.get_logger", "tensorflow.config.list_physical_devices", "tensorflow.config.optimizer.set_experimental_options", "tensorflow.config.set_visible_devices" ] ]
bentrevett/recurrent-attention-model
[ "c07ec97b105c785f6e170a2ba1b3bc7c96e9a920" ]
[ "test_glimpse_sensor.py" ]
[ "import torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nimport data_loader\nfrom modules.glimpse_sensor import GlimpseSensor\n\ndef save_images(images, labels, name):\n\n assert len(images) == len(labels) == 5\n\n # get rid of channel dimension of 1, if exists\n images = images.squeeze()\n\n # Create figure with sub-plots.\n fig, axes = plt.subplots(1, 5)\n plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, \n hspace = 0, wspace = 0.1)\n\n\n for i, ax in enumerate(axes.flat):\n # plot the image\n ax.imshow(images[i], cmap=\"Greys_r\")\n\n #xlabel = f'{labels[i]}'\n #ax.set_xlabel(xlabel)\n ax.set_xticks([])\n ax.set_yticks([])\n\n # save image\n fig.savefig(name, bbox_inches = 'tight')\n\ndata = 'MNIST'\nbatch_size = 5\ntrain_data, test_data = data_loader.get_data(data, batch_size)\n\n# get batch \ntest_iter = iter(test_data)\nimages, labels = test_iter.next()\n\n# plot raw images\nos.makedirs('images', exist_ok=True)\nX = images.numpy()\nX = np.transpose(X, [0, 2, 3, 1])\nsave_images(X, labels, 'images/raw')\n\n# create glimpse sensor module\npatch_size = 8\nn_patches = 3\nscale = 2\nglimpse_sensor = GlimpseSensor(patch_size, n_patches, scale)\n\n# location, [0,0] implies centre of the image\nlocation = torch.zeros(batch_size, 2)\n\n# put images through glimpse sensor, making it return list of pytorch tensor images\nx = glimpse_sensor.get_patches(images, location, return_images=True)\n\n# plot all patches for each of the batch of images\ns = 1\nfor i, _x in enumerate(x):\n X = _x.cpu().numpy()\n X = np.transpose(X, [0,2,3,1])\n save_images(X, labels, f'images/patch_{i+1}_scale_{s}')\n s *= scale\n\n# test shape of output\nx = glimpse_sensor.get_patches(images, location)\n\nn_channels = 1\n\nassert x.shape == (batch_size, n_patches*n_channels*patch_size*patch_size)" ]
[ [ "matplotlib.pyplot.subplots_adjust", "torch.zeros", "matplotlib.pyplot.subplots", "numpy.transpose" ] ]
paarthbir77/face-detection
[ "5797f996783abda936bb4756657b4adc86f8d1cc" ]
[ "face_project_test.py" ]
[ "# Import packages\nimport os\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nimport sys\n\n# This is needed since the notebook is stored in the object_detection folder.\nsys.path.append(\"..\")\n\n# Import utilites\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\n\n# Name of the directory containing the object detection module we're using\nMODEL_NAME = 'inference_graph'\n\n# Grab path to current working directory\nCWD_PATH = os.getcwd()\n\n# Path to frozen detection graph .pb file, which contains the model that is used\n# for object detection.\nPATH_TO_CKPT = 'face_inference_graph/frozen_inference_graph.pb'\n\n# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = os.path.join('training', 'object-detection.pbtxt')\n\nNUM_CLASSES = 1\n\n## Load the label map.\n\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\n# Load the Tensorflow model into memory.\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n sess = tf.Session(graph=detection_graph)\n\n\n# Define input and output tensors (i.e. data) for the object detection classifier\n\n# Input tensor is the image\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n\n# Output tensors are the detection boxes, scores, and classes\n# Each box represents a part of the image where a particular object was detected\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n\n# Each score represents level of confidence for each of the objects.\n# The score is shown on the result image, together with the class label.\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n\n# Number of objects detected\nnum_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n# Initialize webcam feed\nvideo = cv2.VideoCapture(0)\nret = video.set(3,1280)\nret = video.set(4,720)\n\nwhile(True):\n\n # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]\n # i.e. a single-column array, where each item in the column has the pixel RGB value\n ret, frame = video.read()\n frame_expanded = np.expand_dims(frame, axis=0)\n\n # Perform the actual detection by running the model with the image as input\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: frame_expanded})\n\n # Draw the results of the detection (aka 'visulaize the results')\n vis_util.visualize_boxes_and_labels_on_image_array(\n frame,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=8,\n min_score_thresh=0.60)\n\n # All the results have been drawn on the frame, so it's time to display it.\n cv2.imshow('Object detector', frame)\n\n # Press 'q' to quit\n if cv2.waitKey(1) == ord('q'):\n break\n\n# Clean up\nvideo.release()\ncv2.destroyAllWindows()\n" ]
[ [ "tensorflow.Graph", "numpy.expand_dims", "tensorflow.import_graph_def", "tensorflow.gfile.GFile", "numpy.squeeze", "tensorflow.Session", "tensorflow.GraphDef" ] ]
erinzm/NEXT-chemistry
[ "d6ca0a80640937b36f9cafb5ead371e7a8677734" ]
[ "apps/PoolBasedTripletMDS/algs/UncertaintySampling/myAlg.py" ]
[ "\"\"\"\nUncertaintySampling app of the Online Learning Library for Next.Discovery\nauthor: Kevin Jamieson, [email protected]\nlast updated: 1/17/2015\n\"\"\"\nimport numpy\nimport numpy as np\nimport numpy.random\nimport random\nimport next.utils as utils\nfrom apps.PoolBasedTripletMDS.algs.UncertaintySampling import utilsMDS\nimport time\n\nclass MyAlg:\n def initExp(self,butler,n,d,failure_probability):\n X = numpy.random.randn(n,d)\n butler.algorithms.set(key='n',value=n)\n butler.algorithms.set(key='d',value=d)\n butler.algorithms.set(key='delta',value=failure_probability)\n butler.algorithms.set(key='X',value=X.tolist())\n butler.algorithms.set(key='num_reported_answers',value=0)\n return True\n\n\n def getQuery(self,butler):\n n = butler.algorithms.get(key='n')\n d = butler.algorithms.get(key='d')\n # If number of reported answers is small, generate random to avoid overfitting\n num_reported_answers = butler.algorithms.get(key='num_reported_answers')\n if num_reported_answers == None:\n num_reported_answers = 0\n R = int(1+d*numpy.log(n))\n if num_reported_answers < R*n:\n # This generates the same shuffle every time this everytime\n # TODO: but this in utils and call this from other algorithms (they use\n # the same method).\n r = random.Random()\n r.seed(42)\n idxs = np.arange(n).repeat(R).tolist()\n r.shuffle(idxs)\n\n a = idxs[num_reported_answers]\n b = numpy.random.randint(n)\n while b==a:\n b = numpy.random.randint(n)\n c = numpy.random.randint(n)\n while c==a or c==b:\n c = numpy.random.randint(n)\n return [a, b, c]\n # generate an active query\n X = numpy.array(butler.algorithms.get(key='X'))\n # set maximum time allowed to search for a query\n t_max = 0.05\n q,signed_score = utilsMDS.getRandomQuery(X)\n best_q = q\n best_score = abs(signed_score)\n t_start = time.time()\n while time.time()-t_start<t_max:\n q,signed_score = utilsMDS.getRandomQuery(X)\n if abs(signed_score) < best_score:\n best_q = q\n best_score = abs(signed_score)\n index_center = best_q[2]\n index_left = best_q[0]\n index_right = best_q[1]\n return [index_center,index_left,index_right]\n\n\n def processAnswer(self,butler,center_id,left_id,right_id,target_winner):\n if left_id==target_winner:\n q = [left_id,right_id,center_id]\n else:\n q = [right_id,left_id,center_id]\n butler.algorithms.append(key='S',value=q)\n n = butler.algorithms.get(key='n')\n num_reported_answers = butler.algorithms.increment(key='num_reported_answers')\n if num_reported_answers % int(n) == 0:\n butler.job('full_embedding_update', {}, time_limit=30)\n else:\n butler.job('incremental_embedding_update', {},time_limit=5)\n return True\n\n\n def getModel(self,butler):\n return butler.algorithms.get(key=['X','num_reported_answers'])\n\n\n def incremental_embedding_update(self,butler,args):\n verbose = False\n S = butler.algorithms.get(key='S')\n X = numpy.array(butler.algorithms.get(key='X'))\n # set maximum time allowed to update embedding\n t_max = 1.0\n epsilon = 0.01 # a relative convergence criterion, see computeEmbeddingWithGD documentation\n # take a single gradient step\n t_start = time.time()\n X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=1,verbose=verbose)\n k = 1\n while (time.time()-t_start<0.5*t_max) and (acc > epsilon):\n # take a single gradient step\n X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=2**k,verbose=verbose)\n k += 1\n butler.algorithms.set(key='X',value=X.tolist())\n\n def full_embedding_update(self,butler,args):\n verbose = False\n n = butler.algorithms.get(key='n')\n d = butler.algorithms.get(key='d')\n S = butler.algorithms.get(key='S')\n X_old = numpy.array(butler.algorithms.get(key='X'))\n t_max = 5.0\n epsilon = 0.01 # a relative convergence criterion, see computeEmbeddingWithGD documentation\n emp_loss_old,hinge_loss_old = utilsMDS.getLoss(X_old,S)\n X,tmp = utilsMDS.computeEmbeddingWithEpochSGD(n,d,S,max_num_passes=16,epsilon=0,verbose=verbose)\n t_start = time.time()\n X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=1,verbose=verbose)\n k = 1\n while (time.time()-t_start<0.5*t_max) and (acc > epsilon):\n X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=2**k,verbose=verbose)\n k += 1\n emp_loss_new,hinge_loss_new = utilsMDS.getLoss(X,S)\n if emp_loss_old < emp_loss_new:\n X = X_old\n butler.algorithms.set(key='X',value=X.tolist())\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.arange", "numpy.log", "numpy.random.randn", "numpy.random.randint" ] ]
KimDaeUng/few-shot-learning
[ "7068e088a8ae558fe050e8ee46c0c18953a147fd" ]
[ "data.py" ]
[ "import configparser\r\nimport os\r\nimport re\r\nimport string\r\nimport pickle\r\nimport copy\r\nimport random\r\nimport numpy as np\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\nfrom fastNLP import Vocabulary\r\nfrom dataset import Dataset\r\nfrom dataloader import TrainDataLoader\r\nfrom utils import padding, batch_padding\r\n\r\n\r\ndef _parse_list(data_path, list_name):\r\n domain = set()\r\n with open(os.path.join(data_path, list_name), 'r', encoding='utf-8') as f:\r\n for line in f:\r\n domain.add(line.strip('\\n'))\r\n return domain\r\n\r\n\r\ndef get_domains(data_path, filtered_name, target_name):\r\n all_domains = _parse_list(data_path, filtered_name)\r\n test_domains = _parse_list(data_path, target_name)\r\n train_domains = all_domains - test_domains\r\n print('train domains', len(train_domains), 'test_domains', len(test_domains))\r\n return sorted(list(train_domains)), sorted(list(test_domains))\r\n\r\n\r\ndef _parse_data(data_path, filename):\r\n neg = {\r\n 'filename': filename,\r\n 'data': [],\r\n 'target': []\r\n }\r\n pos = {\r\n 'filename': filename,\r\n 'data': [],\r\n 'target': []\r\n }\r\n with open(os.path.join(data_path, filename), 'r', encoding='utf-8') as f:\r\n for line in f:\r\n line = line.strip('\\n')\r\n if line[-2:] == '-1':\r\n neg['data'].append(line[:-2])\r\n neg['target'].append(0)\r\n else:\r\n pos['data'].append(line[:-1])\r\n pos['target'].append(1)\r\n # check\r\n print(filename, 'neg', len(neg['data']), 'pos', len(pos['data']))\r\n return neg, pos\r\n\r\n\r\ndef _process_data(data_dict):\r\n for i in range(len(data_dict['data'])):\r\n text = data_dict['data'][i]\r\n # ignore string.punctuation\r\n text = re.sub('[%s]' % re.escape(string.punctuation), ' ', text)\r\n # string.whitespace -> space\r\n text = re.sub('[%s]' % re.escape(string.whitespace), ' ', text)\r\n # lower case\r\n text = text.lower()\r\n # split by whitespace\r\n text = text.split()\r\n # replace\r\n data_dict['data'][i] = text\r\n return data_dict\r\n\r\n\r\ndef _get_data(data_path, domains, usage):\r\n # usage in ['train', 'dev', 'test']\r\n data = {}\r\n for domain in domains:\r\n for t in ['t2', 't4', 't5']:\r\n filename = '.'.join([domain, t, usage])\r\n neg, pos = _parse_data(data_path, filename)\r\n neg = _process_data(neg)\r\n pos = _process_data(pos)\r\n data[filename] = {'neg': neg, 'pos': pos}\r\n return data\r\n\r\n\r\ndef get_train_data(data_path, domains):\r\n train_data = _get_data(data_path, domains, 'train')\r\n print('train data', len(train_data))\r\n return train_data\r\n\r\n\r\ndef _combine_data(support_data, data):\r\n # support -> dev, test\r\n for key in data:\r\n key_split = key.split('.')[0:-1] + ['train']\r\n support_key = '.'.join(key_split)\r\n for value in data[key]:\r\n data[key][value]['support_data'] = copy.deepcopy(support_data[support_key][value]['data'])\r\n data[key][value]['support_target'] = copy.deepcopy(support_data[support_key][value]['target'])\r\n return data\r\n\r\n\r\ndef get_test_data(data_path, domains):\r\n # get dev, test data\r\n support_data = _get_data(data_path, domains, 'train')\r\n dev_data = _get_data(data_path, domains, 'dev')\r\n test_data = _get_data(data_path, domains, 'test')\r\n\r\n # support -> dev, test\r\n dev_data = _combine_data(support_data, dev_data)\r\n test_data = _combine_data(support_data, test_data)\r\n print('dev data', len(dev_data), 'test data', len(test_data))\r\n return dev_data, test_data\r\n\r\n\r\ndef get_vocabulary(data, min_freq):\r\n # train data -> vocabulary\r\n vocabulary = Vocabulary(min_freq=min_freq, padding='<pad>', unknown='<unk>')\r\n for filename in data:\r\n for value in data[filename]:\r\n for word_list in data[filename][value]['data']:\r\n vocabulary.add_word_lst(word_list)\r\n vocabulary.build_vocab()\r\n print('vocab size', len(vocabulary), 'pad', vocabulary.padding_idx, 'unk', vocabulary.unknown_idx)\r\n return vocabulary\r\n\r\n\r\ndef _idx_text(text_list, vocabulary):\r\n for i in range(len(text_list)):\r\n for j in range(len(text_list[i])):\r\n text_list[i][j] = vocabulary.to_index(text_list[i][j])\r\n return text_list\r\n\r\n\r\ndef idx_all_data(data, vocabulary):\r\n for filename in data:\r\n for value in data[filename]:\r\n for key in data[filename][value]:\r\n if key in ['data', 'support_data']:\r\n data[filename][value][key] = _idx_text(data[filename][value][key], vocabulary)\r\n return data\r\n\r\n\r\ndef get_train_loader(train_data, support, query, pad_idx):\r\n batch_size = support + query\r\n train_loaders = {}\r\n for filename in train_data:\r\n neg_dl = DataLoader(Dataset(train_data[filename]['neg'], pad_idx), batch_size=batch_size, shuffle=True, drop_last=False, **kwargs)\r\n pos_dl = DataLoader(Dataset(train_data[filename]['pos'], pad_idx), batch_size=batch_size, shuffle=True, drop_last=False, **kwargs)\r\n if min(len(neg_dl), len(pos_dl)) > 0:\r\n train_loaders[filename] = {\r\n 'neg': neg_dl,\r\n 'pos': pos_dl\r\n }\r\n print('train loaders', len(train_loaders))\r\n return TrainDataLoader(train_loaders, support=support, query=query, pad_idx=pad_idx)\r\n\r\n\r\ndef get_test_loader(full_data, support, query, pad_idx):\r\n loader = []\r\n for filename in full_data:\r\n # support\r\n support_data = full_data[filename]['neg']['support_data'][0:support] + full_data[filename]['pos']['support_data'][0:support]\r\n support_data = batch_padding(support_data, pad_idx)\r\n support_target = full_data[filename]['neg']['support_target'][0:support] + full_data[filename]['pos']['support_target'][0:support]\r\n support_target = torch.tensor(support_target)\r\n # query\r\n neg_dl = DataLoader(Dataset(full_data[filename]['neg'], pad_idx), batch_size=query * 2, shuffle=False, drop_last=False, **kwargs)\r\n pos_dl = DataLoader(Dataset(full_data[filename]['pos'], pad_idx), batch_size=query * 2, shuffle=False, drop_last=False, **kwargs)\r\n # combine\r\n for dl in [neg_dl, pos_dl]:\r\n for batch_data, batch_target in dl:\r\n support_data_cp, support_target_cp = copy.deepcopy(support_data), copy.deepcopy(support_target)\r\n support_data_cp, batch_data = padding(support_data_cp, batch_data, pad_idx)\r\n data = torch.cat([support_data_cp, batch_data], dim=0)\r\n target = torch.cat([support_target_cp, batch_target], dim=0)\r\n loader.append((data, target))\r\n print('test loader length', len(loader))\r\n return loader\r\n\r\n\r\ndef main():\r\n train_domains, test_domains = get_domains(data_path, config['data']['filtered_list'], config['data']['target_list'])\r\n\r\n train_data = get_train_data(data_path, train_domains)\r\n dev_data, test_data = get_test_data(data_path, test_domains)\r\n # print(dev_data['books.t2.dev']['neg']['support_data'])\r\n # print(dev_data['books.t2.dev']['neg']['support_target'])\r\n\r\n vocabulary = get_vocabulary(train_data, min_freq=int(config['data']['min_freq']))\r\n pad_idx = vocabulary.padding_idx\r\n pickle.dump(vocabulary, open(os.path.join(config['data']['path'], config['data']['vocabulary']), 'wb'))\r\n\r\n train_data = idx_all_data(train_data, vocabulary)\r\n dev_data = idx_all_data(dev_data, vocabulary)\r\n test_data = idx_all_data(test_data, vocabulary)\r\n # print(dev_data['books.t2.dev']['neg']['support_data'])\r\n # print(dev_data['books.t2.dev']['neg']['support_target'])\r\n\r\n support = int(config['model']['support'])\r\n query = int(config['model']['query'])\r\n train_loader = get_train_loader(train_data, support, query, pad_idx)\r\n dev_loader = get_test_loader(dev_data, support, query, pad_idx)\r\n test_loader = get_test_loader(test_data, support, query, pad_idx)\r\n \r\n pickle.dump(train_loader, open(os.path.join(config['data']['path'], config['data']['train_loader']), 'wb'))\r\n pickle.dump(dev_loader, open(os.path.join(config['data']['path'], config['data']['dev_loader']), 'wb'))\r\n pickle.dump(test_loader, open(os.path.join(config['data']['path'], config['data']['test_loader']), 'wb'))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # config\r\n config = configparser.ConfigParser()\r\n config.read(\"config.ini\")\r\n\r\n # seed\r\n seed = int(config['data']['seed'])\r\n random.seed(seed)\r\n np.random.seed(seed)\r\n torch.manual_seed(seed)\r\n torch.cuda.manual_seed_all(seed)\r\n\r\n kwargs = {'num_workers': 4, 'pin_memory': True} if torch.cuda.is_available() else {}\r\n data_path = config['data']['path']\r\n main()\r\n" ]
[ [ "numpy.random.seed", "torch.cat", "torch.manual_seed", "torch.tensor", "torch.cuda.is_available", "torch.cuda.manual_seed_all" ] ]
jaeglee0416/pydatalab
[ "ef1842571127708d494a05ee247b17bc28e33b63" ]
[ "solutionbox/structured_data/mltoolbox/_structured_data/prediction/predict.py" ]
[ "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Runs prediction on a trained model.\"\"\"\n\n\nimport argparse\nimport datetime\nimport os\nimport sys\n\nimport apache_beam as beam\n\n\ndef parse_arguments(argv):\n \"\"\"Parse command line arguments.\n\n Args:\n argv: includes the script's name.\n\n Returns:\n argparse object\n \"\"\"\n parser = argparse.ArgumentParser(\n description='Runs Prediction inside a beam or Dataflow job.')\n # cloud options\n parser.add_argument('--project-id',\n help='The project to which the job will be submitted.')\n parser.add_argument('--cloud',\n action='store_true',\n help='Run preprocessing on the cloud.')\n parser.add_argument('--job-name',\n default=('mltoolbox-batch-prediction-' +\n datetime.datetime.now().strftime('%Y%m%d%H%M%S')),\n help='Dataflow job name. Must be unique over all jobs.')\n parser.add_argument('--extra-package',\n default=[],\n action='append',\n help=('If using --cloud, also installs these packages on '\n 'each dataflow worker'))\n\n # I/O args\n parser.add_argument('--predict-data',\n required=True,\n help='Data to run prediction on')\n parser.add_argument('--trained-model-dir',\n required=True,\n help='Usually train_output_path/model.')\n parser.add_argument('--output-dir',\n required=True,\n help=('Location to save output.'))\n\n # Other args\n parser.add_argument('--batch-size',\n required=False,\n default=1000,\n type=int,\n help=('Batch size. Larger values consumes more memrory '\n 'but takes less time to finish.'))\n parser.add_argument('--shard-files',\n dest='shard_files',\n action='store_true',\n help='Shard files')\n parser.add_argument('--no-shard-files',\n dest='shard_files',\n action='store_false',\n help='Don\\'t shard files')\n parser.set_defaults(shard_files=True)\n\n parser.add_argument('--output-format',\n choices=['csv', 'json'],\n default='csv',\n help=\"\"\"\n The output results.\n raw_json: produces a newline file where each line is json. No\n post processing is performed and the output matches what the trained\n model produces.\n csv: produces a csv file without a header row and a header csv file.\n For classification problems, the vector of probabalities for each\n target class is split into individual csv columns.\"\"\")\n\n args, _ = parser.parse_known_args(args=argv[1:])\n\n if args.cloud:\n if not args.project_id:\n raise ValueError('--project-id needed with --cloud')\n if not args.trained_model_dir.startswith('gs://'):\n raise ValueError('--trained-model-dir needs to be a GCS path,')\n if not args.output_dir.startswith('gs://'):\n raise ValueError('--output-dir needs to be a GCS path.')\n if not args.predict_data.startswith('gs://'):\n raise ValueError('--predict-data needs to be a GCS path.')\n\n return args\n\n\nclass EmitAsBatchDoFn(beam.DoFn):\n \"\"\"A DoFn that buffers the records and emits them batch by batch.\"\"\"\n\n def __init__(self, batch_size):\n \"\"\"Constructor of EmitAsBatchDoFn beam.DoFn class.\n\n Args:\n batch_size: the max size we want to buffer the records before emitting.\n \"\"\"\n self._batch_size = batch_size\n self._cached = []\n\n def process(self, element):\n self._cached.append(element)\n if len(self._cached) >= self._batch_size:\n emit = self._cached\n self._cached = []\n yield emit\n\n def finish_bundle(self, element=None):\n if len(self._cached) > 0: # pylint: disable=g-explicit-length-test\n yield self._cached\n\n\nclass RunGraphDoFn(beam.DoFn):\n \"\"\"A DoFn for running the TF graph.\"\"\"\n\n def __init__(self, trained_model_dir):\n self._trained_model_dir = trained_model_dir\n self._session = None\n\n def start_bundle(self, element=None):\n from tensorflow.python.saved_model import tag_constants\n from tensorflow.contrib.session_bundle import bundle_shim\n\n self._session, meta_graph = bundle_shim.load_session_bundle_or_saved_model_bundle_from_path(\n self._trained_model_dir, tags=[tag_constants.SERVING])\n signature = meta_graph.signature_def['serving_default']\n\n # get the mappings between aliases and tensor names\n # for both inputs and outputs\n self._input_alias_map = {friendly_name: tensor_info_proto.name\n for (friendly_name, tensor_info_proto) in signature.inputs.items()}\n self._output_alias_map = {friendly_name: tensor_info_proto.name\n for (friendly_name, tensor_info_proto) in signature.outputs.items()}\n self._aliases, self._tensor_names = zip(*self._output_alias_map.items())\n\n def finish_bundle(self, element=None):\n self._session.close()\n\n def process(self, element):\n \"\"\"Run batch prediciton on a TF graph.\n\n Args:\n element: list of strings, representing one batch input to the TF graph.\n \"\"\"\n import collections\n import apache_beam as beam\n\n num_in_batch = 0\n try:\n assert self._session is not None\n\n feed_dict = collections.defaultdict(list)\n for line in element:\n\n # Remove trailing newline.\n if line.endswith('\\n'):\n line = line[:-1]\n\n feed_dict[self._input_alias_map.values()[0]].append(line)\n num_in_batch += 1\n\n # batch_result is list of numpy arrays with batch_size many rows.\n batch_result = self._session.run(fetches=self._tensor_names,\n feed_dict=feed_dict)\n\n # ex batch_result for batch_size > 1:\n # (array([value1, value2, ..., value_batch_size]),\n # array([[a1, b1, c1]], ..., [a_batch_size, b_batch_size, c_batch_size]]),\n # ...)\n # ex batch_result for batch_size == 1:\n # (value,\n # array([a1, b1, c1]),\n # ...)\n\n # Convert the results into a dict and unbatch the results.\n if num_in_batch > 1:\n for result in zip(*batch_result):\n predictions = {}\n for name, value in zip(self._aliases, result):\n predictions[name] = (value.tolist() if getattr(value, 'tolist', None) else value)\n yield predictions\n else:\n predictions = {}\n for i in range(len(self._aliases)):\n value = batch_result[i]\n value = (value.tolist() if getattr(value, 'tolist', None)\n else value)\n predictions[self._aliases[i]] = value\n yield predictions\n\n except Exception as e: # pylint: disable=broad-except\n yield beam.pvalue.SideOutputValue('errors',\n (str(e), element))\n\n\nclass RawJsonCoder(beam.coders.Coder):\n \"\"\"Coder for json newline files.\"\"\"\n\n def encode(self, obj):\n \"\"\"Encodes a python object into a JSON string.\n\n Args:\n obj: python object.\n\n Returns:\n JSON string.\n \"\"\"\n import json\n return json.dumps(obj, separators=(',', ': '))\n\n\nclass CSVCoder(beam.coders.Coder):\n \"\"\"Coder for CSV files containing the output of prediction.\"\"\"\n\n def __init__(self, header):\n \"\"\"Sets the headers in the csv file.\n\n Args:\n header: list of strings that correspond to keys in the predictions dict.\n \"\"\"\n self._header = header\n\n def make_header_string(self):\n return ','.join(self._header)\n\n def encode(self, tf_graph_predictions):\n \"\"\"Encodes the graph json prediction into csv.\n\n Args:\n tf_graph_predictions: python dict.\n\n Returns:\n csv string.\n \"\"\"\n row = []\n for col in self._header:\n row.append(str(tf_graph_predictions[col]))\n\n return ','.join(row)\n\n\nclass FormatAndSave(beam.PTransform):\n\n def __init__(self, args):\n self._shard_name_template = None if args.shard_files else ''\n self._output_format = args.output_format\n self._output_dir = args.output_dir\n\n # Get the BQ schema if csv.\n if self._output_format == 'csv':\n from tensorflow.python.saved_model import tag_constants\n from tensorflow.contrib.session_bundle import bundle_shim\n from tensorflow.core.framework import types_pb2\n\n session, meta_graph = bundle_shim.load_session_bundle_or_saved_model_bundle_from_path(\n args.trained_model_dir, tags=[tag_constants.SERVING])\n signature = meta_graph.signature_def['serving_default']\n\n self._schema = []\n for friendly_name in sorted(signature.outputs):\n tensor_info_proto = signature.outputs[friendly_name]\n\n # TODO(brandondutra): Could dtype be DT_INVALID?\n # Consider getting the dtype from the graph via\n # session.graph.get_tensor_by_name(tensor_info_proto.name).dtype)\n dtype = tensor_info_proto.dtype\n if dtype == types_pb2.DT_FLOAT or dtype == types_pb2.DT_DOUBLE:\n bq_type = 'FLOAT'\n elif dtype == types_pb2.DT_INT32 or dtype == types_pb2.DT_INT64:\n bq_type = 'INTEGER'\n else:\n bq_type = 'STRING'\n\n self._schema.append({'mode': 'NULLABLE',\n 'name': friendly_name,\n 'type': bq_type})\n session.close()\n\n def apply(self, datasets):\n return self.expand(datasets)\n\n def expand(self, datasets):\n import json\n\n tf_graph_predictions, errors = datasets\n\n if self._output_format == 'json':\n (tf_graph_predictions |\n 'Write Raw JSON' >>\n beam.io.textio.WriteToText(os.path.join(self._output_dir, 'predictions'),\n file_name_suffix='.json',\n coder=RawJsonCoder(),\n shard_name_template=self._shard_name_template))\n elif self._output_format == 'csv':\n # make a csv header file\n header = [col['name'] for col in self._schema]\n csv_coder = CSVCoder(header)\n (tf_graph_predictions.pipeline |\n 'Make CSV Header' >>\n beam.Create([json.dumps(self._schema, indent=2)]) |\n 'Write CSV Schema File' >>\n beam.io.textio.WriteToText(os.path.join(self._output_dir, 'csv_schema'),\n file_name_suffix='.json',\n shard_name_template=''))\n\n # Write the csv predictions\n (tf_graph_predictions |\n 'Write CSV' >>\n beam.io.textio.WriteToText(os.path.join(self._output_dir, 'predictions'),\n file_name_suffix='.csv',\n coder=csv_coder,\n shard_name_template=self._shard_name_template))\n else:\n raise ValueError('FormatAndSave: unknown format %s', self._output_format)\n\n # Write the errors to a text file.\n (errors |\n 'Write Errors' >>\n beam.io.textio.WriteToText(os.path.join(self._output_dir, 'errors'),\n file_name_suffix='.txt',\n shard_name_template=self._shard_name_template))\n\n\ndef make_prediction_pipeline(pipeline, args):\n \"\"\"Builds the prediction pipeline.\n\n Reads the csv files, prepends a ',' if the target column is missing, run\n prediction, and then prints the formated results to a file.\n\n Args:\n pipeline: the pipeline\n args: command line args\n \"\"\"\n\n # DF bug: DF does not work with unicode strings\n predicted_values, errors = (\n pipeline |\n 'Read CSV Files' >>\n beam.io.ReadFromText(str(args.predict_data),\n strip_trailing_newlines=True) |\n 'Batch Input' >>\n beam.ParDo(EmitAsBatchDoFn(args.batch_size)) |\n 'Run TF Graph on Batches' >>\n beam.ParDo(RunGraphDoFn(args.trained_model_dir)).with_outputs('errors', main='main'))\n\n ((predicted_values, errors) |\n 'Format and Save' >>\n FormatAndSave(args))\n\n\ndef main(argv=None):\n args = parse_arguments(sys.argv if argv is None else argv)\n\n if args.cloud:\n options = {\n 'staging_location': os.path.join(args.output_dir, 'tmp', 'staging'),\n 'temp_location': os.path.join(args.output_dir, 'tmp', 'staging'),\n 'job_name': args.job_name,\n 'project': args.project_id,\n 'no_save_main_session': True,\n 'extra_packages': args.extra_package,\n 'teardown_policy': 'TEARDOWN_ALWAYS',\n }\n opts = beam.pipeline.PipelineOptions(flags=[], **options)\n # Or use BlockingDataflowPipelineRunner\n p = beam.Pipeline('DataflowRunner', options=opts)\n else:\n p = beam.Pipeline('DirectRunner')\n\n make_prediction_pipeline(p, args)\n\n if args.cloud:\n print(('Dataflow Job submitted, see Job %s at '\n 'https://console.developers.google.com/dataflow?project=%s') %\n (options['job_name'], args.project_id))\n sys.stdout.flush()\n\n runner_results = p.run()\n return runner_results\n\n\nif __name__ == '__main__':\n runner_results = main()\n runner_results.wait_until_finish()\n" ]
[ [ "tensorflow.contrib.session_bundle.bundle_shim.load_session_bundle_or_saved_model_bundle_from_path" ] ]
a625687551/jieba_faster
[ "6376b4f0602d9d15367111e4d31e1087c91d31fc" ]
[ "test/extract_topic.py" ]
[ "import sys\n\nsys.path.append(\"../\")\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn import decomposition\n\nimport jieba\nimport time\nimport glob\nimport sys\nimport os\nimport random\n\nif len(sys.argv) < 2:\n print(\"usage: extract_topic.py directory [n_topic] [n_top_words]\")\n sys.exit(0)\n\nn_topic = 10\nn_top_words = 25\n\nif len(sys.argv) > 2:\n n_topic = int(sys.argv[2])\n\nif len(sys.argv) > 3:\n n_top_words = int(sys.argv[3])\n\ncount_vect = CountVectorizer()\ndocs = []\n\npattern = os.path.join(sys.argv[1], \"*.txt\")\nprint(\"read \" + pattern)\n\nfor f_name in glob.glob(pattern):\n with open(f_name) as f:\n print(\"read file:\", f_name)\n for line in f: # one line as a document\n words = \" \".join(jieba.cut(line))\n docs.append(words)\n\nrandom.shuffle(docs)\n\nprint(\"read done.\")\n\nprint(\"transform\")\ncounts = count_vect.fit_transform(docs)\ntfidf = TfidfTransformer().fit_transform(counts)\nprint(tfidf.shape)\n\nt0 = time.time()\nprint(\"training...\")\n\nnmf = decomposition.NMF(n_components=n_topic).fit(tfidf)\nprint(\"done in %0.3fs.\" % (time.time() - t0))\n\n# Inverse the vectorizer vocabulary to be able\nfeature_names = count_vect.get_feature_names()\n\nfor topic_idx, topic in enumerate(nmf.components_):\n print(\"Topic #%d:\" % topic_idx)\n print(\" \".join([feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]]))\n print(\"\")\n" ]
[ [ "sklearn.decomposition.NMF", "sklearn.feature_extraction.text.CountVectorizer", "sklearn.feature_extraction.text.TfidfTransformer" ] ]
szha/onnxmltools
[ "b04d05bda625cbc006955ce0a220277739a95825", "b04d05bda625cbc006955ce0a220277739a95825" ]
[ "tests/sparkml/test_decision_tree_regressor.py", "onnxmltools/convert/xgboost/_parse.py" ]
[ "import sys\r\nimport inspect\r\nimport unittest\r\nfrom distutils.version import StrictVersion\r\n\r\nimport onnx\r\nimport pandas\r\nimport numpy\r\nfrom pyspark.ml.linalg import Vectors, SparseVector, VectorUDT\r\nfrom pyspark.ml.regression import DecisionTreeRegressor\r\nfrom pyspark.ml import Pipeline\r\n\r\nfrom onnxmltools import convert_sparkml\r\nfrom onnxmltools.convert.common.data_types import FloatTensorType\r\nfrom tests.sparkml.sparkml_test_utils import save_data_models, run_onnx_model, compare_results\r\nfrom tests.sparkml import SparkMlTestCase\r\nfrom pyspark.ml.feature import VectorIndexer\r\n\r\n\r\nclass TestSparkmDecisionTreeRegressor(SparkMlTestCase):\r\n @unittest.skipIf(sys.version_info[0] == 2, reason=\"Sparkml not tested on python 2\")\r\n @unittest.skipIf(StrictVersion(onnx.__version__) <= StrictVersion('1.3'), 'Need Greater Opset 9')\r\n def test_decision_tree_regressor_pipeline(self):\r\n import os\r\n this_script_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\r\n input_path = os.path.join(this_script_dir, \"data\", \"sample_libsvm_data.txt\")\r\n original_data = self.spark.read.format(\"libsvm\").load(input_path)\r\n\r\n feature_count = 5\r\n self.spark.udf.register(\"truncateFeatures\",\r\n lambda x: SparseVector(feature_count, range(0,feature_count), x.toArray()[125:130]),\r\n VectorUDT())\r\n data = original_data.selectExpr(\"label\", \"truncateFeatures(features) as features\")\r\n\r\n featureIndexer = \\\r\n VectorIndexer(inputCol=\"features\", outputCol=\"indexedFeatures\", maxCategories=4, handleInvalid='error')\r\n (trainingData, testData) = data.randomSplit([0.7, 0.3])\r\n dt = DecisionTreeRegressor(featuresCol=\"indexedFeatures\")\r\n pipeline = Pipeline(stages=[featureIndexer, dt])\r\n model = pipeline.fit(trainingData)\r\n model_onnx = convert_sparkml(model, 'Sparkml Decision Tree Regressor Pipeline', [\r\n ('features', FloatTensorType([1, feature_count]))\r\n ], spark_session=self.spark)\r\n self.assertTrue(model_onnx is not None)\r\n # run the model\r\n predicted = model.transform(testData)\r\n data_np = testData.toPandas().features.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\r\n expected = [\r\n predicted.toPandas().prediction.values.astype(numpy.float32)\r\n ]\r\n paths = save_data_models(data_np, expected, model, model_onnx,\r\n basename=\"SparkmlDecisionTreeRegressorPipeline\")\r\n onnx_model_path = paths[3]\r\n output, output_shapes = run_onnx_model(['prediction'], data_np, onnx_model_path)\r\n compare_results(expected, output, decimal=5)\r\n\r\n @unittest.skipIf(sys.version_info[0] == 2, reason=\"Sparkml not tested on python 2\")\r\n def test_decision_tree_regressor(self):\r\n features = [[0, 1], [1, 1], [2, 0]]\r\n features = numpy.array(features, dtype=numpy.float32)\r\n labels = [100, -10, 50]\r\n dd = [(labels[i], Vectors.dense(features[i])) for i in range(len(labels))]\r\n data = self.spark.createDataFrame(self.spark.sparkContext.parallelize(dd), schema=[\"label\", \"features\"])\r\n dt = DecisionTreeRegressor(labelCol=\"label\", featuresCol=\"features\")\r\n model = dt.fit(data)\r\n feature_count = data.select('features').first()[0].size\r\n model_onnx = convert_sparkml(model, 'Sparkml Decision Tree Regressor', [\r\n ('features', FloatTensorType([1, feature_count]))\r\n ], spark_session=self.spark)\r\n self.assertTrue(model_onnx is not None)\r\n # run the model\r\n data_np = data.toPandas().features.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)\r\n predicted = model.transform(data)\r\n expected = [\r\n predicted.toPandas().prediction.values.astype(numpy.float32)\r\n ]\r\n paths = save_data_models(data_np, expected, model, model_onnx,\r\n basename=\"SparkmlDecisionTreeRegressor\")\r\n onnx_model_path = paths[3]\r\n output, output_shapes = run_onnx_model(['prediction'], data_np, onnx_model_path)\r\n compare_results(expected, output, decimal=5)\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n", "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\nimport json\nimport numpy as np\nfrom xgboost import XGBRegressor, XGBClassifier\nfrom onnxconverter_common.data_types import FloatTensorType\nfrom ..common._container import XGBoostModelContainer\nfrom ..common._topology import Topology\n\n\nxgboost_classifier_list = [XGBClassifier]\n\n# Associate types with our operator names.\nxgboost_operator_name_map = {XGBClassifier: 'XGBClassifier',\n XGBRegressor: 'XGBRegressor'}\n\n\ndef _append_covers(node):\n res = []\n if 'cover' in node:\n res.append(node['cover'])\n if 'children' in node:\n for ch in node['children']:\n res.extend(_append_covers(ch))\n return res\n\n\ndef _get_attributes(booster):\n atts = booster.attributes()\n ntrees = booster.best_ntree_limit\n dp = booster.get_dump(dump_format='json', with_stats=True) \n res = [json.loads(d) for d in dp]\n trees = len(res)\n kwargs = atts.copy()\n kwargs['feature_names'] = booster.feature_names\n kwargs['n_estimators'] = ntrees\n \n # covers\n covs = []\n for tr in res:\n covs.extend(_append_covers(tr))\n\n if all(map(lambda x: int(x) == x, set(covs))):\n # regression\n kwargs['num_class'] = 0\n if trees > ntrees > 0:\n kwargs['num_target'] = trees // ntrees\n kwargs[\"objective\"] = \"reg:squarederror\"\n else:\n kwargs['num_target'] = 1\n kwargs[\"objective\"] = \"reg:squarederror\"\n else:\n # classification\n kwargs['num_target'] = 0\n if trees > ntrees > 0:\n kwargs['num_class'] = trees // ntrees\n kwargs[\"objective\"] = \"multi:softprob\"\n else:\n kwargs['num_class'] = 1\n kwargs[\"objective\"] = \"binary:logistic\"\n\n if 'base_score' not in kwargs:\n kwargs['base_score'] = 0.5\n return kwargs\n\n\nclass WrappedBooster:\n\n def __init__(self, booster):\n self.booster_ = booster\n self.kwargs = _get_attributes(booster)\n\n if self.kwargs['num_class'] > 0:\n self.classes_ = self._generate_classes(self.kwargs)\n self.operator_name = 'XGBClassifier'\n else:\n self.operator_name = 'XGBRegressor'\n\n def get_xgb_params(self):\n return self.kwargs\n\n def get_booster(self):\n return self.booster_\n\n def _generate_classes(self, model_dict):\n if model_dict['num_class'] == 1:\n return np.asarray([0, 1])\n return np.arange(model_dict['num_class']) \n\n\ndef _get_xgboost_operator_name(model):\n '''\n Get operator name of the input argument\n\n :param model_type: A xgboost object.\n :return: A string which stands for the type of the input model in our conversion framework\n '''\n if isinstance(model, WrappedBooster):\n return model.operator_name\n if type(model) not in xgboost_operator_name_map:\n raise ValueError(\"No proper operator name found for '%s'\" % type(model))\n return xgboost_operator_name_map[type(model)]\n\n\ndef _parse_xgboost_simple_model(scope, model, inputs):\n '''\n This function handles all non-pipeline models.\n\n :param scope: Scope object\n :param model: A xgboost object\n :param inputs: A list of variables\n :return: A list of output variables which will be passed to next stage\n '''\n this_operator = scope.declare_local_operator(_get_xgboost_operator_name(model), model)\n this_operator.inputs = inputs\n\n if (type(model) in xgboost_classifier_list or\n getattr(model, 'operator_name', None) == 'XGBClassifier'):\n # For classifiers, we may have two outputs, one for label and the other one for probabilities of all classes.\n # Notice that their types here are not necessarily correct and they will be fixed in shape inference phase\n label_variable = scope.declare_local_variable('label', FloatTensorType())\n probability_map_variable = scope.declare_local_variable('probabilities', FloatTensorType())\n this_operator.outputs.append(label_variable)\n this_operator.outputs.append(probability_map_variable)\n else:\n # We assume that all scikit-learn operator can only produce a single float tensor.\n variable = scope.declare_local_variable('variable', FloatTensorType())\n this_operator.outputs.append(variable)\n return this_operator.outputs\n\n\ndef _parse_xgboost(scope, model, inputs):\n '''\n This is a delegate function. It doesn't nothing but invoke the correct parsing function according to the input\n model's type.\n :param scope: Scope object\n :param model: A xgboost object\n :param inputs: A list of variables\n :return: The output variables produced by the input model\n '''\n return _parse_xgboost_simple_model(scope, model, inputs)\n\n\ndef parse_xgboost(model, initial_types=None, target_opset=None,\n custom_conversion_functions=None, custom_shape_calculators=None):\n\n raw_model_container = XGBoostModelContainer(model)\n topology = Topology(raw_model_container, default_batch_size='None',\n initial_types=initial_types, target_opset=target_opset,\n custom_conversion_functions=custom_conversion_functions,\n custom_shape_calculators=custom_shape_calculators)\n scope = topology.declare_scope('__root__')\n\n inputs = []\n for var_name, initial_type in initial_types:\n inputs.append(scope.declare_local_variable(var_name, initial_type))\n\n for variable in inputs:\n raw_model_container.add_input(variable)\n\n outputs = _parse_xgboost(scope, model, inputs)\n\n for variable in outputs:\n raw_model_container.add_output(variable)\n\n return topology\n" ]
[ [ "numpy.array" ], [ "numpy.asarray", "numpy.arange" ] ]
sidmohite/kNe-inference
[ "5642d7f4dc954cb36d76c3729d1f719ff04db0d9" ]
[ "scripts/bayesian_LC_gpapprox.py" ]
[ "import numpy as np\nimport astropy as ap\nfrom astropy import units as u\nfrom astropy import constants as c\nimport matplotlib.pyplot as plt\nimport bayesian_LC as blc\nfrom approxposterior import approx, gpUtils, likelihood as lh, utility as ut\nimport emcee\nimport corner\n\ndef prior_sample(n):\n return np.array([[np.random.uniform(low=-17,high=-12),np.random.uniform(low=0,high=1)] for i in range(n)])\n\nkne_inf = blc.Kilonova_Inference()\n\nm_low = 15\nm_high = 27\nmlims_f_j = np.array([[22,26,26,26],[21,27,27,27]])\nf_bar_sum = kne_inf.compute_f_bar_sum(mlims_f_j)\nt0 = 1*u.day\nT_f_j = [(np.linspace(t0.value,t0.value+4,4))*u.day,(np.linspace(t0.value,t0.value+4,4))*u.day]\nP_f = np.array([0.999,0.])\np_d_f = np.array([kne_inf.create_distance_dist(156,41),kne_inf.create_distance_dist(156,41)])\nP_A= 0.2\nP_T = 1.-P_A\n\nm_low_arr = np.ones_like(mlims_f_j)*m_low\nm_high_arr = np.ones_like(mlims_f_j)*m_high\ntotal_obs = sum(len(m) for m in mlims_f_j)\nplims_f_bar = 1/(m_high-m_low)**(total_obs)*(1-sum(P_f))\nplims_T = 1/(m_high-m_low)**(total_obs)\n\ntheta = np.array([[np.random.uniform(low=-17,high=-12),np.random.uniform(low=0,high=1)] for i in range(100)])\ny = np.array([kne_inf.ln_prob(th,mlims_f_j,T_f_j,t0,p_d_f,P_f,P_A,P_T,f_bar_sum,plims_f_bar,plims_T,m_low_arr,m_high_arr) for th in theta])\nnp.savetxt('theta.txt',theta)\nnp.savetxt('y.txt',y)\ny_no_inf = np.nan_to_num(y,neginf=-600.)\ny_no_inf[y_no_inf<=-600.] = -600.\n#theta = np.loadtxt('theta.txt')\n#y_no_inf = np.nan_to_num(np.loadtxt('y.txt'),neginf=-710.)\ngp = gpUtils.defaultGP(theta, y_no_inf, white_noise=-12)\n\nm=200\nnmax=3\nbounds = [(-17,-12),(0,1)]\nalgorithm = 'bape'\nsamplerKwargs = {\"nwalkers\" : 20}\nmcmcKwargs = {\"iterations\" : 8000}\n\nap = approx.ApproxPosterior(theta=theta, y=y_no_inf, gp=gp, lnprior=kne_inf.ln_prior, lnlike=kne_inf.ln_likelihood,\n priorSample=prior_sample, algorithm=algorithm, bounds=bounds)\n\nap.run(m=m, convergenceCheck=True, estBurnin=True, nGPRestarts=3, mcmcKwargs=mcmcKwargs,\n cache=False, samplerKwargs=samplerKwargs, verbose=True, thinChains=True,\n optGPEveryN=5,args=[mlims_f_j,T_f_j,t0,p_d_f,P_f,P_A,P_T,f_bar_sum,plims_f_bar,plims_T,m_low_arr,m_high_arr])\n\nsamples = ap.sampler.get_chain(discard=ap.iburns[-1], flat=True, thin=ap.ithins[-1])\n\n# Corner plot!\nfig = corner.corner(samples, quantiles=[0.16, 0.5, 0.84], show_titles=True,\n range=[(-17,-12),(0,1)],scale_hist=True, plot_contours=True,labels=[r\"$M0$\",r\"$\\gamma$\"],title_kwargs={\"fontsize\": 14})\nplt.savefig(fname='../plots/uniform-likelihood-simplified-2f-1-0-PA0p2-22-26-26-26-and-21-27-27-27.png',format='png')\n\n\n\n\n" ]
[ [ "numpy.ones_like", "numpy.linspace", "numpy.nan_to_num", "matplotlib.pyplot.savefig", "numpy.savetxt", "numpy.random.uniform", "numpy.array" ] ]
jenna-h/dominion-data
[ "c55ca165e5bff899443dfb3f73379775beadf53e" ]
[ "dom_average.py" ]
[ "from PIL import Image\nimport numpy\nimport os\nimport requests\nimport shutil\n\nIM_WIDTH, IM_HEIGHT = 309, 496\nHORIZ_IM_WIDTH, HORIZ_IM_HEIGHT = 473, 290\nALL_CARDS_STRING = '''Black Market\tPromo\t10\nChurch\tPromo\t10\nDismantle\tPromo\t10\nEnvoy\tPromo\t10\nSauna\tPromo\t5\nAvanto\tPromo\t5\nWalled Village\tPromo\t10\nGovernor\tPromo\t10\nCaptain\tPromo\t10\nPrince\tPromo\t10\nCopper\tBase\t60\nCurse\tBase\t30\nEstate\tBase\t24\nSilver\tBase\t40\nDuchy\tBase\t12\nGold\tBase\t30\nProvince\tBase\t12\nCellar\tBase\t10\nChapel\tBase\t10\nMoat\tBase\t10\nHarbinger\tBase\t10\nMerchant\tBase\t10\nVassal\tBase\t10\nVillage\tBase\t10\nWorkshop\tBase\t10\nBureaucrat\tBase\t10\nGardens\tBase\t12\nMilitia\tBase\t10\nMoneylender\tBase\t10\nPoacher\tBase\t10\nRemodel\tBase\t10\nSmithy\tBase\t10\nThrone Room\tBase\t10\nBandit\tBase\t10\nCouncil Room\tBase\t10\nFestival\tBase\t10\nLaboratory\tBase\t10\nLibrary\tBase\t10\nMarket\tBase\t10\nMine\tBase\t10\nSentry\tBase\t10\nWitch\tBase\t10\nArtisan\tBase\t10\nCourtyard\tIntrigue\t10\nLurker\tIntrigue\t10\nPawn\tIntrigue\t10\nMasquerade\tIntrigue\t10\nShanty Town\tIntrigue\t10\nSteward\tIntrigue\t10\nSwindler\tIntrigue\t10\nWishing Well\tIntrigue\t10\nBaron\tIntrigue\t10\nBridge\tIntrigue\t10\nConspirator\tIntrigue\t10\nDiplomat\tIntrigue\t10\nIronworks\tIntrigue\t10\nMill\tIntrigue\t12\nMining Village\tIntrigue\t10\nSecret Passage\tIntrigue\t10\nCourtier\tIntrigue\t10\nDuke\tIntrigue\t12\nMinion\tIntrigue\t10\nPatrol\tIntrigue\t10\nReplace\tIntrigue\t10\nTorturer\tIntrigue\t10\nTrading Post\tIntrigue\t10\nUpgrade\tIntrigue\t10\nHarem\tIntrigue\t12\nNobles\tIntrigue\t12\nEmbargo\tSeaside\t10\nHaven\tSeaside\t10\nLighthouse\tSeaside\t10\nNative Village\tSeaside\t10\nPearl Diver\tSeaside\t10\nAmbassador\tSeaside\t10\nFishing Village\tSeaside\t10\nLookout\tSeaside\t10\nSmugglers\tSeaside\t10\nWarehouse\tSeaside\t10\nCaravan\tSeaside\t10\nCutpurse\tSeaside\t10\nIsland\tSeaside\t12\nNavigator\tSeaside\t10\nPirate Ship\tSeaside\t10\nSalvager\tSeaside\t10\nSea Hag\tSeaside\t10\nTreasure Map\tSeaside\t10\nBazaar\tSeaside\t10\nExplorer\tSeaside\t10\nGhost Ship\tSeaside\t10\nMerchant Ship\tSeaside\t10\nOutpost\tSeaside\t10\nTactician\tSeaside\t10\nTreasury\tSeaside\t10\nWharf\tSeaside\t10\nPotion\tAlchemy\t16\nTransmute\tAlchemy\t10\nVineyard\tAlchemy\t12\nHerbalist\tAlchemy\t10\nApothecary\tAlchemy\t10\nScrying Pool\tAlchemy\t10\nUniversity\tAlchemy\t10\nAlchemist\tAlchemy\t10\nFamiliar\tAlchemy\t10\nPhilosopher's Stone\tAlchemy\t10\nGolem\tAlchemy\t10\nApprentice\tAlchemy\t10\nPossession\tAlchemy\t10\nPlatinum\tProsperity\t12\nColony\tProsperity\t12\nLoan\tProsperity\t10\nTrade Route\tProsperity\t10\nWatchtower\tProsperity\t10\nBishop\tProsperity\t10\nMonument\tProsperity\t10\nQuarry\tProsperity\t10\nTalisman\tProsperity\t10\nWorker's Village\tProsperity\t10\nCity\tProsperity\t10\nContraband\tProsperity\t10\nCounting House\tProsperity\t10\nMint\tProsperity\t10\nMountebank\tProsperity\t10\nRabble\tProsperity\t10\nRoyal Seal\tProsperity\t10\nVault\tProsperity\t10\nVenture\tProsperity\t10\nGoons\tProsperity\t10\nGrand Market\tProsperity\t10\nHoard\tProsperity\t10\nBank\tProsperity\t10\nExpand\tProsperity\t10\nForge\tProsperity\t10\nKing's Court\tProsperity\t10\nPeddler\tProsperity\t10\nHamlet\tCornucopia\t10\nFortune Teller\tCornucopia\t10\nMenagerie\tCornucopia\t10\nFarming Village\tCornucopia\t10\nHorse Traders\tCornucopia\t10\nRemake\tCornucopia\t10\nTournament\tCornucopia\t10\nYoung Witch\tCornucopia\t10\nHarvest\tCornucopia\t10\nHorn of Plenty\tCornucopia\t10\nHunting Party\tCornucopia\t10\nJester\tCornucopia\t10\nFairgrounds\tCornucopia\t12\nBag of Gold\tCornucopia\t1\nDiadem\tCornucopia\t1\nFollowers\tCornucopia\t1\nPrincess\tCornucopia\t1\nTrusty Steed\tCornucopia\t1\nCrossroads\tHinterlands\t10\nDuchess\tHinterlands\t10\nFool's Gold\tHinterlands\t10\nDevelop\tHinterlands\t10\nOasis\tHinterlands\t10\nOracle\tHinterlands\t10\nScheme\tHinterlands\t10\nTunnel\tHinterlands\t12\nJack of All Trades\tHinterlands\t10\nNoble Brigand\tHinterlands\t10\nNomad Camp\tHinterlands\t10\nSilk Road\tHinterlands\t12\nSpice Merchant\tHinterlands\t10\nTrader\tHinterlands\t10\nCache\tHinterlands\t10\nCartographer\tHinterlands\t10\nEmbassy\tHinterlands\t10\nHaggler\tHinterlands\t10\nHighway\tHinterlands\t10\nIll-Gotten Gains\tHinterlands\t10\nInn\tHinterlands\t10\nMandarin\tHinterlands\t10\nMargrave\tHinterlands\t10\nStables\tHinterlands\t10\nBorder Village\tHinterlands\t10\nFarmland\tHinterlands\t12\nPoor House\tDark Ages\t10\nBeggar\tDark Ages\t10\nSquire\tDark Ages\t10\nVagrant\tDark Ages\t10\nForager\tDark Ages\t10\nHermit\tDark Ages\t10\nMarket Square\tDark Ages\t10\nSage\tDark Ages\t10\nStoreroom\tDark Ages\t10\nUrchin\tDark Ages\t10\nArmory\tDark Ages\t10\nDeath Cart\tDark Ages\t10\nFeodum\tDark Ages\t12\nFortress\tDark Ages\t10\nIronmonger\tDark Ages\t10\nMarauder\tDark Ages\t10\nProcession\tDark Ages\t10\nRats\tDark Ages\t20\nScavenger\tDark Ages\t10\nWandering Minstrel\tDark Ages\t10\nBand of Misfits\tDark Ages\t10\nBandit Camp\tDark Ages\t10\nCatacombs\tDark Ages\t10\nCount\tDark Ages\t10\nCounterfeit\tDark Ages\t10\nCultist\tDark Ages\t10\nGraverobber\tDark Ages\t10\nJunk Dealer\tDark Ages\t10\nMystic\tDark Ages\t10\nPillage\tDark Ages\t10\nRebuild\tDark Ages\t10\nRogue\tDark Ages\t10\nAltar\tDark Ages\t10\nHunting Grounds\tDark Ages\t2\nAbandoned Mine\tDark Ages\t2\nRuined Library\tDark Ages\t2\nRuined Market\tDark Ages\t2\nRuined Village\tDark Ages\t2\nSurvivors\tDark Ages\t2\nHovel\tDark Ages\t6\nNecropolis\tDark Ages\t6\nOvergrown Estate\tDark Ages\t6\nMadman\tDark Ages\t10\nMercenary\tDark Ages\t10\nSpoils\tDark Ages\t15\nDame Anna\tDark Ages\t1\nDame Josephine\tDark Ages\t1\nDame Molly\tDark Ages\t1\nDame Natalie\tDark Ages\t1\nDame Sylvia\tDark Ages\t1\nSir Bailey\tDark Ages\t1\nSir Destry\tDark Ages\t1\nSir Martin\tDark Ages\t1\nSir Michael\tDark Ages\t1\nSir Vander\tDark Ages\t1\nCandlestick Maker\tGuilds\t10\nStonemason\tGuilds\t10\nDoctor\tGuilds\t10\nMasterpiece\tGuilds\t10\nAdvisor\tGuilds\t10\nPlaza\tGuilds\t10\nTaxman\tGuilds\t10\nHerald\tGuilds\t10\nBaker\tGuilds\t10\nButcher\tGuilds\t10\nJourneyman\tGuilds\t10\nMerchant Guild\tGuilds\t10\nSoothsayer\tGuilds\t10\nPort\tAdventures\t10\nCoin of the Realm\tAdventures\t10\nRatcatcher\tAdventures\t10\nRaze\tAdventures\t10\nAmulet\tAdventures\t10\nCaravan Guard\tAdventures\t10\nDungeon\tAdventures\t10\nGear\tAdventures\t10\nGuide\tAdventures\t10\nDuplicate\tAdventures\t10\nMagpie\tAdventures\t10\nMessenger\tAdventures\t10\nMiser\tAdventures\t10\nRanger\tAdventures\t10\nTransmogrify\tAdventures\t10\nArtificer\tAdventures\t10\nBridge Troll\tAdventures\t10\nDistant Lands\tAdventures\t12\nGiant\tAdventures\t10\nHaunted Woods\tAdventures\t10\nLost City\tAdventures\t10\nRelic\tAdventures\t10\nRoyal Carriage\tAdventures\t10\nStoryteller\tAdventures\t10\nSwamp Hag\tAdventures\t10\nTreasure Trove\tAdventures\t10\nWine Merchant\tAdventures\t10\nHireling\tAdventures\t10\nPage\tAdventures\t10\nTreasure Hunter\tAdventures\t5\nWarrior\tAdventures\t5\nHero\tAdventures\t5\nChampion\tAdventures\t5\nPeasant\tAdventures\t10\nSoldier\tAdventures\t5\nFugitive\tAdventures\t5\nDisciple\tAdventures\t5\nTeacher\tAdventures\t5\nEngineer\tEmpires\t10\nCity Quarter\tEmpires\t10\nOverlord\tEmpires\t10\nRoyal Blacksmith\tEmpires\t10\nEncampment\tEmpires\t5\nPlunder\tEmpires\t5\nPatrician\tEmpires\t5\nEmporium\tEmpires\t5\nSettlers\tEmpires\t5\nBustling Village\tEmpires\t5\nCatapult\tEmpires\t5\nRocks\tEmpires\t5\nChariot Race\tEmpires\t10\nEnchantress\tEmpires\t10\nFarmers' Market\tEmpires\t10\nGladiator\tEmpires\t5\nFortune\tEmpires\t5\nSacrifice\tEmpires\t10\nTemple\tEmpires\t10\nVilla\tEmpires\t10\nArchive\tEmpires\t10\nCapital\tEmpires\t10\nCharm\tEmpires\t10\nCrown\tEmpires\t10\nForum\tEmpires\t10\nGroundskeeper\tEmpires\t10\nLegionary\tEmpires\t10\nWild Hunt\tEmpires\t10\nHumble Castle\tEmpires\t1\nCrumbling Castle\tEmpires\t1\nSmall Castle\tEmpires\t1\nHaunted Castle\tEmpires\t1\nOpulent Castle\tEmpires\t1\nSprawling Castle\tEmpires\t1\nGrand Castle\tEmpires\t1\nKing's Castle\tEmpires\t1\nDruid\tNocturne\t10\nFaithful Hound\tNocturne\t10\nGuardian\tNocturne\t10\nMonastery\tNocturne\t10\nPixie\tNocturne\t10\nTracker\tNocturne\t10\nChangeling\tNocturne\t10\nFool\tNocturne\t10\nGhost Town\tNocturne\t10\nLeprechaun\tNocturne\t10\nNight Watchman\tNocturne\t10\nSecret Cave\tNocturne\t10\nBard\tNocturne\t10\nBlessed Village\tNocturne\t10\nCemetery\tNocturne\t12\nConclave\tNocturne\t10\nDevil's Workshop\tNocturne\t10\nExorcist\tNocturne\t10\nNecromancer\tNocturne\t10\nShepherd\tNocturne\t10\nSkulk\tNocturne\t10\nCobbler\tNocturne\t10\nCrypt\tNocturne\t10\nCursed Village\tNocturne\t10\nDen of Sin\tNocturne\t10\nIdol\tNocturne\t10\nPooka\tNocturne\t10\nSacred Grove\tNocturne\t10\nTormentor\tNocturne\t10\nTragic Hero\tNocturne\t10\nVampire\tNocturne\t10\nWerewolf\tNocturne\t10\nRaider\tNocturne\t10\nHaunted Mirror\tNocturne\t6\nMagic Lamp\tNocturne\t6\nGoat\tNocturne\t6\nPasture\tNocturne\t6\nPouch\tNocturne\t6\nCursed Gold\tNocturne\t6\nLucky Coin\tNocturne\t6\nWill-o'-Wisp\tNocturne\t12\nWish\tNocturne\t12\nBat\tNocturne\t10\nImp\tNocturne\t13\nZombie Apprentice\tNocturne\t1\nZombie Mason\tNocturne\t1\nZombie Spy\tNocturne\t1\nGhost\tNocturne\t6\nBorder Guard\tRenaissance\t10\nDucat\tRenaissance\t10\nLackeys\tRenaissance\t10\nActing Troupe\tRenaissance\t10\nCargo Ship\tRenaissance\t10\nExperiment\tRenaissance\t10\nImprove\tRenaissance\t10\nFlag Bearer\tRenaissance\t10\nHideout\tRenaissance\t10\nInventor\tRenaissance\t10\nMountain Village\tRenaissance\t10\nPatron\tRenaissance\t10\nPriest\tRenaissance\t10\nResearch\tRenaissance\t10\nSilk Merchant\tRenaissance\t10\nOld Witch\tRenaissance\t10\nRecruiter\tRenaissance\t10\nScepter\tRenaissance\t10\nScholar\tRenaissance\t10\nSculptor\tRenaissance\t10\nSeer\tRenaissance\t10\nSpices\tRenaissance\t10\nSwashbuckler\tRenaissance\t10\nTreasurer\tRenaissance\t10\nVillain\tRenaissance\t10\nBlack Cat\tMenagerie\t10\nSleigh\tMenagerie\t10\nSupplies\tMenagerie\t10\nCamel Train\tMenagerie\t10\nGoatherd\tMenagerie\t10\nScrap\tMenagerie\t10\nSheepdog\tMenagerie\t10\nSnowy Village\tMenagerie\t10\nStockpile\tMenagerie\t10\nBounty Hunter\tMenagerie\t10\nCardinal\tMenagerie\t10\nCavalry\tMenagerie\t10\nGroom\tMenagerie\t10\nHostelry\tMenagerie\t10\nVillage Green\tMenagerie\t10\nBarge\tMenagerie\t10\nCoven\tMenagerie\t10\nDisplace\tMenagerie\t10\nFalconer\tMenagerie\t10\nGatekeeper\tMenagerie\t10\nHunting Lodge\tMenagerie\t10\nKiln\tMenagerie\t10\nLivery\tMenagerie\t10\nMastermind\tMenagerie\t10\nPaddock\tMenagerie\t10\nSanctuary\tMenagerie\t10\nFisherman\tMenagerie\t10\nDestrier\tMenagerie\t10\nWayfarer\tMenagerie\t10\nAnimal Fair\tMenagerie\t10\nHorse\tMenagerie\t30'''\n\nHORIZONTAL_ALL_CARDS_STRING = '''Alms\tAdventures\nBorrow\tAdventures\nQuest\tAdventures\nSave\tAdventures\nScouting Party\tAdventures\nTravelling Fair\tAdventures\nBonfire\tAdventures\nExpedition\tAdventures\nFerry\tAdventures\nPlan\tAdventures\nMission\tAdventures\nPilgrimage\tAdventures\nBall\tAdventures\nRaid\tAdventures\nSeaway\tAdventures\nTrade\tAdventures\nLost Arts\tAdventures\nTraining\tAdventures\nInheritance\tAdventures\nPathfinding\tAdventures\nTriumph\tEmpires\nAnnex\tEmpires\nDonate\tEmpires\nAdvance\tEmpires\nDelve\tEmpires\nTax\tEmpires\nBanquet\tEmpires\nRitual\tEmpires\nSalt the Earth\tEmpires\nWedding\tEmpires\nWindfall\tEmpires\nConquest\tEmpires\nDominate\tEmpires\nAqueduct\tEmpires\nArena\tEmpires\nBandit Fort\tEmpires\nBasilica\tEmpires\nBaths\tEmpires\nBattlefield\tEmpires\nColonnade\tEmpires\nDefiled Shrine\tEmpires\nFountain\tEmpires\nKeep\tEmpires\nLabyrinth\tEmpires\nMountain Pass\tEmpires\nMuseum\tEmpires\nObelisk\tEmpires\nOrchard\tEmpires\nPalace\tEmpires\nTomb\tEmpires\nTower\tEmpires\nTriumphal Arch\tEmpires\nWall\tEmpires\nWolf Den\tEmpires\nThe Earth's Gift\tNocturne\nThe Field's Gift\tNocturne\nThe Flame's Gift\tNocturne\nThe Forest's Gift\tNocturne\nThe Moon's Gift\tNocturne\nThe Mountain's Gift\tNocturne\nThe River's Gift\tNocturne\nThe Sea's Gift\tNocturne\nThe Sky's Gift\tNocturne\nThe Sun's Gift\tNocturne\nThe Swamp's Gift\tNocturne\nThe Wind's Gift\tNocturne\nBad Omens\tNocturne\nDelusion\tNocturne\nEnvy\tNocturne\nFamine\tNocturne\nFear\tNocturne\nGreed\tNocturne\nHaunting\tNocturne\nLocusts\tNocturne\nMisery\tNocturne\nPlague\tNocturne\nPoverty\tNocturne\nWar\tNocturne\nCathedral\tRenaissance\nCity Gate\tRenaissance\nPageant\tRenaissance\nSewers\tRenaissance\nStar Chart\tRenaissance\nExploration\tRenaissance\nFair\tRenaissance\nSilos\tRenaissance\nSinister Plot\tRenaissance\nAcademy\tRenaissance\nCapitalism\tRenaissance\nFleet\tRenaissance\nGuildhall\tRenaissance\nPiazza\tRenaissance\nRoad Network\tRenaissance\nBarracks\tRenaissance\nCrop Rotation\tRenaissance\nInnovation\tRenaissance\nCanal\tRenaissance\nCitadel\tRenaissance\nDelay\tMenagerie\nDesperation\tMenagerie\nGamble\tMenagerie\nPursue\tMenagerie\nRide\tMenagerie\nToil\tMenagerie\nEnhance\tMenagerie\nMarch\tMenagerie\nTransport\tMenagerie\nBanish\tMenagerie\nBargain\tMenagerie\nInvest\tMenagerie\nSeize the Day\tMenagerie\nCommerce\tMenagerie\nDemand\tMenagerie\nStampede\tMenagerie\nReap\tMenagerie\nEnclave\tMenagerie\nAlliance\tMenagerie\nPopulate\tMenagerie\nWay of the Butterfly\tMenagerie\nWay of the Camel\tMenagerie\nWay of the Chameleon\tMenagerie\nWay of the Frog\tMenagerie\nWay of the Goat\tMenagerie\nWay of the Horse\tMenagerie\nWay of the Mole\tMenagerie\nWay of the Monkey\tMenagerie\nWay of the Mouse\tMenagerie\nWay of the Mule\tMenagerie\nWay of the Otter\tMenagerie\nWay of the Owl\tMenagerie\nWay of the Ox\tMenagerie\nWay of the Pig\tMenagerie\nWay of the Rat\tMenagerie\nWay of the Seal\tMenagerie\nWay of the Sheep\tMenagerie\nWay of the Squirrel\tMenagerie\nWay of the Turtle\tMenagerie\nWay of the Worm\tMenagerie\nSummon\tPromo'''\n\n# Obtain card images via\n# wget -r -np http://wiki.dominionstrategy.com/images/\n# (You can probably do something smart with filtering ('-R'), but I'm dumb and didn't)\n\nis_card_used = {}\nfor l in HORIZONTAL_ALL_CARDS_STRING.split('\\n'):\n name, expansion = l.split('\\t')\n is_card_used[name] = False\n\ncard_data = []\nfor l in ALL_CARDS_STRING.split('\\n'):\n name, expansion, num = l.split('\\t')\n card_data.append((name, expansion, int(num)))\n\nhorizontal_card_data = []\nfor l in HORIZONTAL_ALL_CARDS_STRING.split('\\n'):\n horizontal_card_data.append(tuple(l.split('\\t')))\n \ndef crawl(root_dir, dest_dir):\n for root, dirs, files in os.walk(root_dir):\n for name in files:\n try:\n no_suffix, suffix = name.split('.')\n if suffix not in ('jpg', 'png'):\n continue\n for card_name in is_card_used:\n formatted_name = card_name.replace(' ', '_')\n if formatted_name+'Digital' == no_suffix:\n shutil.copy(os.path.join(root, name), os.path.join(dest_dir, name))\n is_card_used[card_name] = True\n except Exception as e:\n print(name, 'failed because', e)\n for card_name, used in is_card_used.items():\n if not used:\n print(card_name, 'was not used')\n\ndef calculate_sizes(directory):\n min_width, max_width = float('inf'), 0\n min_height, max_height = float('inf'), 0\n for filename in os.listdir(directory):\n try:\n im = Image.open(os.path.join(directory, filename))\n width, height = im.size\n min_width, max_width = min(min_width, width), max(max_width, width)\n min_height, max_height = min(min_height, height), max(max_height, height)\n except: # not an image file\n pass\n print(min_width, max_width, min_height, max_height)\n # prints \n\ndef canonicalize_images(inputs, outputs):\n for root, dirs, files in os.walk(inputs):\n for name in files:\n try:\n im = Image.open(os.path.join(inputs, name))\n new_file_name = os.path.join(outputs, name.replace('Digital', ''))\n im.resize((IM_WIDTH, IM_HEIGHT)).save(new_file_name)\n except Exception as e:\n print(name, 'failed because', e)\n\ndef calculate_average(canon_images, expansion, weighted, output):\n total = 0\n res = numpy.zeros((IM_HEIGHT, IM_WIDTH, 3), dtype='float')\n for name, exp, count in card_data:\n if expansion == 'All' or exp == expansion:\n im = Image.open(os.path.join(canon_images, name.replace(' ', '_')+'.jpg'))\n numpy_im = numpy.array(im, dtype='float')\n if weighted:\n res += numpy_im*count\n total += count\n else:\n res += numpy_im\n total += 1\n if total != 0:\n res /= total\n Image.fromarray(numpy.round(res).astype('uint8'), 'RGB').save(os.path.join(\n output, expansion+('weighted' if weighted else 'unweighted')+'.jpg'))\n\n \n\ncrawl('/Users/jennahimawan/Downloads/big-ims', '/Users/jennahimawan/Downloads/vertical')\ncalculate_sizes('/Users/jennahimawan/Downloads/vertical')\ncanonicalize_images('/Users/jennahimawan/Downloads/vertical', '/Users/jennahimawan/Downloads/vertical-canon')\n\nfor exp in ('Adventures', 'Alchemy', 'All', 'Base', 'Cornucopia', 'Dark Ages', 'Empires',\n 'Guilds', 'Hinterlands', 'Intrigue', 'Menagerie', 'Nocturne', 'Promo', 'Prosperity',\n 'Renaissance', 'Seaside'):\n for weighted in (True, False):\n calculate_average('/Users/jennahimawan/Downloads/vertical-canon',\n exp, weighted, '/Users/jennahimawan/Downloads/outputs')\n\n\ncrawl('/Users/jennahimawan/Downloads/big-ims', '/Users/jennahimawan/Downloads/horizontal')\ncalculate_sizes('/Users/jennahimawan/Downloads/horizontal')\ncanonicalize_images('/Users/jennahimawan/Downloads/horizontal', '/Users/jennahimawan/Downloads/horizontal-canon')\n\nfor exp in ('Adventures', 'Alchemy', 'All', 'Base', 'Cornucopia', 'Dark Ages', 'Empires',\n 'Guilds', 'Hinterlands', 'Intrigue', 'Menagerie', 'Nocturne', 'Promo', 'Prosperity',\n 'Renaissance', 'Seaside'):\n calculate_average('/Users/jennahimawan/Downloads/horizontal-canon',\n exp, False, '/Users/jennahimawan/Downloads/horizontal-outputs')\n" ]
[ [ "numpy.round", "numpy.array", "numpy.zeros" ] ]
SunnyxBd/feature_engine
[ "e40457ce8b4baa1e146976bf1af1bbdf6eae1305" ]
[ "tests/test_transformation/test_log_transformer.py" ]
[ "import pandas as pd\nimport pytest\n\nfrom sklearn.exceptions import NotFittedError\n\nfrom feature_engine.transformation import LogTransformer\n\n\ndef test_log_base_e_plus_automatically_find_variables(df_vartypes):\n # test case 1: log base e, automatically select variables\n transformer = LogTransformer(base=\"e\", variables=None)\n X = transformer.fit_transform(df_vartypes)\n\n # expected output\n transf_df = df_vartypes.copy()\n transf_df[\"Age\"] = [2.99573, 3.04452, 2.94444, 2.89037]\n transf_df[\"Marks\"] = [-0.105361, -0.223144, -0.356675, -0.510826]\n\n # test init params\n assert transformer.base == \"e\"\n assert transformer.variables == [\"Age\", \"Marks\"]\n # test fit attr\n assert transformer.input_shape_ == (4, 5)\n # test transform output\n pd.testing.assert_frame_equal(X, transf_df)\n\n\ndef test_log_base_10_plus_user_passes_var_list(df_vartypes):\n # test case 2: log base 10, user passes variables\n transformer = LogTransformer(base=\"10\", variables=\"Age\")\n X = transformer.fit_transform(df_vartypes)\n\n # expected output\n transf_df = df_vartypes.copy()\n transf_df[\"Age\"] = [1.30103, 1.32222, 1.27875, 1.25527]\n\n # test init params\n assert transformer.base == \"10\"\n assert transformer.variables == [\"Age\"]\n # test fit attr\n assert transformer.input_shape_ == (4, 5)\n # test transform output\n pd.testing.assert_frame_equal(X, transf_df)\n\n\ndef test_error_if_base_value_not_allowed():\n with pytest.raises(ValueError):\n LogTransformer(base=\"other\")\n\n\ndef test_fit_raises_error_if_na_in_df(df_na):\n # test case 3: when dataset contains na, fit method\n with pytest.raises(ValueError):\n transformer = LogTransformer()\n transformer.fit(df_na)\n\n\ndef test_transform_raises_error_if_na_in_df(df_vartypes, df_na):\n # test case 4: when dataset contains na, transform method\n with pytest.raises(ValueError):\n transformer = LogTransformer()\n transformer.fit(df_vartypes)\n transformer.transform(df_na[[\"Name\", \"City\", \"Age\", \"Marks\", \"dob\"]])\n\n\ndef test_error_if_df_contains_negative_values(df_vartypes):\n # test error when data contains negative values\n df_neg = df_vartypes.copy()\n df_neg.loc[1, \"Age\"] = -1\n\n # test case 5: when variable contains negative value, fit\n with pytest.raises(ValueError):\n transformer = LogTransformer()\n transformer.fit(df_neg)\n\n # test case 6: when variable contains negative value, transform\n with pytest.raises(ValueError):\n transformer = LogTransformer()\n transformer.fit(df_vartypes)\n transformer.transform(df_neg)\n\n\ndef test_non_fitted_error(df_vartypes):\n with pytest.raises(NotFittedError):\n transformer = LogTransformer()\n transformer.transform(df_vartypes)\n" ]
[ [ "pandas.testing.assert_frame_equal" ] ]
pasta41/decameron
[ "9b9607077d5a36d231187fc421f8f1474f61b9fa" ]
[ "scripts/plot.py" ]
[ "from collections import defaultdict\nfrom datetime import datetime\nimport math\nfrom operator import itemgetter\nimport os\nimport random\nimport re\n\nimport numpy as np\nimport pandas as pd\nimport little_mallet_wrapper as lmw\nimport pdb\nimport seaborn as sns\n\nimport matplotlib.pyplot as plt\n\nimport constants\n\nroot = \"/home/cooper/src/decameron/output_cached/\"\noutput_path_topic_keys = root + \"mallet.topic_keys.10\"\noutput_path_topic_distributions = root + \"mallet.topic_distributions.10\"\n\ntopic_keys = lmw.load_topic_keys(output_path_topic_keys)\ntopic_distributions = lmw.load_topic_distributions(output_path_topic_distributions)\n\ntruncated_topics = [', '.join(x[:5]) for x in topic_keys]\ntruncated_topics_edited = truncated_topics[:5] + truncated_topics[6:8] + truncated_topics[9:]\ntopics_edited_names = ['seafaring', 'peasants/farming', 'knights/nobility', \n\t'courts/laws', 'merchants/economy', 'courtesy', 'religion/sin', 'love/vengeance']\n\ndecameron_path = \"/home/cooper/src/decameron/data/csv/decameron.csv\"\n\ndef group_wordcounts_by_attr(combined_df, attr):\n\twordcounts = combined_df.groupby([attr])[['{}_unnorm'.format(i) for i in range(10)]].sum()\n\tfor i in range(10):\n\t\twordcounts['{}_norm'.format(i)] = wordcounts['{}_unnorm'.format(i)] / combined_df.groupby([attr])['Word Count'].sum()\n\treturn wordcounts\n\ndef plot_heatmap(wordcounts, out_file_name, vmax, color):\n\tf, ax = plt.subplots(figsize=[10,10])\n\tsns.heatmap(wordcounts[['{}_norm'.format(i) for i in range(10)]].drop(\"8_norm\", axis=1).drop(\"5_norm\", axis=1), \n\t\tvmax=vmax, cmap=sns.light_palette(color, as_cmap=True), \n\t\tannot=True, fmt='.3f', cbar=False)\n\tsns.set_context('talk')\n\n\tplt.xticks(np.arange(8) + .5, topics_edited_names, rotation=45, ha=\"right\")\n\tplt.tight_layout()\n\tplt.savefig(\"/home/cooper/src/decameron/fig/\" + out_file_name)\n\ndef plot_heatmap_big(wordcounts, out_file_name, vmax, color):\n\tf, ax = plt.subplots(figsize=[12,30])\n\tsns.heatmap(wordcounts[['{}_norm'.format(i) for i in range(10)]].drop(\"8_norm\", axis=1).drop(\"5_norm\", axis=1), \n\t\tvmax=vmax, cmap=sns.light_palette(color, as_cmap=True), \n\t\tannot=True, fmt='.2f', cbar=False)\n\tsns.set_context('talk')\n\n\tplt.xticks(np.arange(8) + .5, topics_edited_names, rotation=45, ha=\"right\")\n\tplt.tight_layout()\n\tplt.savefig(\"/home/cooper/src/decameron/fig/\" + out_file_name)\n\ndef plot_heatmap_rectangle(wordcounts, out_file_name, vmax, color):\n\tf, ax = plt.subplots(figsize=[10,5])\n\tsns.heatmap(wordcounts[['{}_norm'.format(i) for i in range(10)]].drop(\"8_norm\", axis=1).drop(\"5_norm\", axis=1), \n\t\tvmax=vmax, cmap=sns.light_palette(color, as_cmap=True), \n\t\tannot=True, fmt='.3f', cbar=False)\n\tsns.set_context('talk')\n\n\tplt.xticks(np.arange(8) + .5, topics_edited_names, rotation=45, ha=\"right\")\n\tplt.tight_layout()\n\tplt.savefig(\"/home/cooper/src/decameron/fig/\" + out_file_name)\n\ndef narrator_topics_barplot(narrator_wordcounts, narrator):\n\tf, ax = plt.subplots(figsize=[8,8])\n\tnarrator_wordcounts_edited = narrator_wordcounts.drop(\"8_norm\", axis=1).drop(\"5_norm\", axis=1)\n\tcols = [0, 1, 2, 3, 4, 6, 7, 9]\n\tnarrator_wordcounts_edited.loc[narrator][['{}_norm'.format(i) for i in cols]].plot(kind=\"bar\")\n\tplt.title(\"Topic Proportions for \" + narrator.capitalize())\n\tax.set_xticklabels(topics_edited_names, rotation=45, ha='right')\n\tplt.tight_layout()\n\t#plt.show()\n\tplt.savefig(\"/home/cooper/src/decameron/fig/\" + narrator + \".pdf\")\n\ndecameron_df = pd.read_csv(decameron_path)\n\ntraining_data = [lmw.process_string(t, \n\tstop_words=constants.stop_words) for t in decameron_df[decameron_df['Day']==1]['Text'].values]\ntraining_data = [d for d in training_data if d.strip()]\n\n#word_count_per_story = [len(d.split(' ')) for d in training_data]\ndecameron_df['Word Count'] = decameron_df['Text'].str.split(' ').apply(lambda x: len(x))\n\n# Compute unnormalized topic breakdown, based on per-story wordcount\ncombined_df = pd.concat([decameron_df, pd.DataFrame(topic_distributions)], axis=1)\nfor i in range(10):\n combined_df['{}_unnorm'.format(i)] = combined_df[i] * combined_df['Word Count']\n\nstory_wordcounts = group_wordcounts_by_attr(combined_df, 'ID')\nplot_heatmap_big(story_wordcounts, \"story_topics\", .15, \"#be0119\")\n\n# Analysis to give topic distributions by Day\nday_wordcounts = group_wordcounts_by_attr(combined_df, 'Day')\nplot_heatmap(day_wordcounts, \"day_topics.pdf\", .09, \"#be0119\")\n\n# Analysis to give topic distributions by Narrator\nnarrator_wordcounts = group_wordcounts_by_attr(combined_df, 'Narrator')\nplot_heatmap(narrator_wordcounts, \"narrator_topics.pdf\", .08, \"#0b2a63\")\n\n# Analysis to give topic distributions by Gender of narrator\ngender_wordcounts = group_wordcounts_by_attr(combined_df, 'Gender')\nplot_heatmap_rectangle(gender_wordcounts, \"gender_topics.pdf\", .05, \"#0e5e24\")\n\n# Analysis to give bar plot for each narrator\nfor narrator in constants.narrators:\n\tnarrator_topics_barplot(narrator_wordcounts, narrator)\n" ]
[ [ "matplotlib.pyplot.tight_layout", "pandas.read_csv", "numpy.arange", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "pandas.DataFrame" ] ]
pdales/FTeikPy
[ "cc187d3beb658d02a7dfc1a78c7c54f507d525a2" ]
[ "fteikpy/bspline_model.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Keurfon Luu <[email protected]>\nLicense: MIT\n\"\"\"\n\nimport numpy as np\nfrom scipy.interpolate import RegularGridInterpolator\nfrom ._bspline import bspline as bspl\n\n__all__ = [ \"bspline1\", \"bspline2\", \"vel2spl\", \"spl2vel\" ]\n\n\ndef bspline1(x, y, xq, order = 4, check = True):\n \"\"\"\n 1-D B-Spline approximation.\n \n Parameters\n ----------\n x : ndarray or list\n 1-D array of real values.\n y : ndarray or list\n 1-D array of real values. The length of y along the interpolation axis\n must be equal to the length of x.\n xq : ndarray of list\n 1-D array of real values to query.\n order : int, default 4\n Order of spline. Order should be less than the number of control\n points.\n check : bool\n Check inputs consistency. Disable checking if you need to call\n 'bspline1' a lot of times for better performance.\n \n Returns\n -------\n yq : ndarray of list\n 1-D array of real values at query points.\n \"\"\"\n # Check inputs\n if check:\n if not isinstance(x, (np.ndarray, list)) and not np.asarray(x).ndim != 1:\n raise ValueError(\"x must be a 1-D ndarray\")\n if not isinstance(y, (np.ndarray, list)) and not np.asarray(y).ndim != 1:\n raise ValueError(\"y must be a 1-D ndarray\")\n if len(x) != len(y):\n raise ValueError(\"x and y must have the same length\")\n if not isinstance(xq, (np.ndarray, list)) and not np.asarray(xq).ndim != 1:\n raise ValueError(\"xq must be a 1-D ndarray\")\n if xq[0] < x[0] or xq[-1] > x[-1]:\n raise ValueError(\"elements in xq should be within x.min and x.max\")\n if not isinstance(order, int) or not 2 <= order <= len(x):\n raise ValueError(\"order should be an integer in [ %d, %d ], got %d\" % (2, len(x), order))\n \n # 1-D B-Spline approximation\n return bspl.bspline1(x, y, xq, order = order)\n\n\ndef bspline2(x, y, z, xq, yq, order = 4, n_threads = 1, check = True):\n \"\"\"\n 2-D B-Spline approximation.\n \n Parameters\n ----------\n x, y : ndarray\n 1-D array of real values.\n z : ndarray\n 2-D array of real values. The shape of z along the interpolation\n axis must be consistent with the lengths of x and y.\n xq, yq : ndarray\n 1-D or 2-D array of real values to query.\n order : int, default 4\n Order of spline. Order should be less than the number of control\n points in both dimensions.\n n_threads : int, default 1\n Number of threads to pass to OpenMP.\n check : bool\n Check inputs consistency. Disable checking if you need to call\n 'bspline2' a lot of times for better performance.\n \n Returns\n -------\n zq : ndarray\n 2-D array of real values at query points.\n \"\"\"\n # Check inputs\n if check:\n if not isinstance(x, (np.ndarray, list)) and not np.asarray(x).ndim != 1:\n raise ValueError(\"x must be a 1-D ndarray\")\n if not isinstance(y, (np.ndarray, list)) and not np.asarray(y).ndim != 1:\n raise ValueError(\"y must be a 1-D ndarray\")\n if not isinstance(z, np.ndarray) and z.ndim != 2:\n raise ValueError(\"z must be a 2-D ndarray\")\n if z.shape != ( len(y), len(x) ):\n raise ValueError(\"z must be of shape (%d, %d), got %s\" \\\n % (len(y), len(x), z.shape))\n if not isinstance(xq, (np.ndarray, list)) and not np.asarray(xq).ndim not in [ 1, 2 ]:\n raise ValueError(\"xq must be a 1-D or 2-D ndarray\")\n if not isinstance(yq, (np.ndarray, list)) and not np.asarray(yq).ndim not in [ 1, 2 ]:\n raise ValueError(\"yq must be a 1-D or 2-D ndarray\")\n if np.asarray(xq).ndim != np.asarray(yq).ndim:\n raise ValueError(\"inconsistent ndim for xq and yq\")\n if np.asarray(xq).ndim == 1:\n if xq[0] < x[0] or xq[-1] > x[-1]:\n raise ValueError(\"elements in xq should be within x.min and x.max\")\n if yq[0] < y[0] or yq[-1] > y[-1]:\n raise ValueError(\"elements in yq should be within y.min and y.max\")\n elif np.asarray(xq).ndim == 2:\n if xq.shape != yq.shape:\n raise ValueError(\"xq and yq must have the same shape\")\n if not 2 <= order <= min(len(x), len(y)):\n raise ValueError(\"order should be in [ %d, %d ], got %d\" % (2, min(len(x), len(y)), order))\n if not isinstance(order, int) or not isinstance(n_threads, int) or n_threads < 1:\n raise ValueError(\"n_threads must be atleast 1, got %s\" % n_threads)\n \n # 2-D B-Spline approximation\n if np.asarray(xq).ndim == 1:\n XQ, YQ = np.meshgrid(xq, yq)\n return bspl.bspline2(x, y, z, XQ, YQ, order = order, n_threads = n_threads)\n elif np.asarray(xq).ndim == 2:\n return bspl.bspline2(x, y, z, xq, yq, order = order, n_threads = n_threads)\n\n\ndef vel2spl(velocity_model, spl_shape, order = 4, n_threads = 1):\n \"\"\"\n Smooth a velocity model by undersampling and interpolating using B-splines.\n \n Parameters\n ----------\n velocity_model : ndarray of shape (nz, nx)\n Velocity model grid in m/s.\n spl_shape : tuple (nz_spl, nx_spl)\n B-splines grid shape.\n order : int, default 4\n Order of spline. Order should be less than the number of control points\n in both dimensions.\n n_threads : int, default 1\n Number of threads to pass to OpenMP.\n \n Returns\n -------\n spl : ndarray of shape (nz, nx)\n Smoothed velocity model grid in m/s.\n \n Note\n ----\n Currently only available in 2-D.\n \"\"\"\n # Check inputs\n if not isinstance(velocity_model, np.ndarray) and velocity_model.ndim != 2:\n raise ValueError(\"velocity_model must be a 2-D ndarray\")\n if np.any(np.array(velocity_model.shape) < 4):\n raise ValueError(\"velocity_model grid shape must be at least 4\")\n if not isinstance(spl_shape, (list, tuple, np.ndarray)):\n raise ValueError(\"spl_shape must be a list, tuple or ndarray\")\n if len(spl_shape) != velocity_model.ndim:\n raise ValueError(\"spl_shape should be of length %d, got %d\" \\\n % (velocity_model.ndim, len(spl_shape)))\n if np.any(np.array(spl_shape) <= 3) or not np.all([ isinstance(i, int) for i in spl_shape ]):\n raise ValueError(\"elements in spl_shape must be integers greater than 3\")\n if not 2 <= order <= np.min(spl_shape):\n raise ValueError(\"order should be in [ %d, %d ], got %d\" % (2, np.min(spl_shape), order))\n if not isinstance(order, int) or not isinstance(n_threads, int) or n_threads < 1:\n raise ValueError(\"n_threads must be atleast 1, got %s\" % n_threads)\n \n # Extract control points\n nz, nx = velocity_model.shape\n nz_spl, nx_spl = spl_shape\n zaxis = np.arange(nz)\n xaxis = np.arange(nx)\n zaxis_spl = np.linspace(0, nz-1, nz_spl)\n xaxis_spl = np.linspace(0, nx-1, nx_spl)\n fn = RegularGridInterpolator((zaxis, xaxis), velocity_model)\n Z, X = np.meshgrid(zaxis_spl, xaxis_spl, indexing = \"ij\")\n spl = fn([ [ z, x ] for z, x in zip(Z.ravel(), X.ravel()) ]).reshape(spl_shape)\n \n # B-spline interpolation\n return bspline2(xaxis_spl, zaxis_spl, spl, xaxis, zaxis, order, n_threads)\n\n\ndef spl2vel(spl, grid_shape, order = 4, n_threads = 1):\n \"\"\"\n Interpolate a velocity model described by spline nodes using B-splines.\n \n Parameters\n ----------\n spl : ndarray of shape (nz_spl, nx_spl)\n Velocity model spline nodes in m/s.\n grid_shape : tuple (nz, nx)\n Output velocity model grid shape.\n order : int, default 4\n Order of spline. Order should be less than the number of control points\n in both dimensions.\n n_threads : int, default 1\n Number of threads to pass to OpenMP.\n \n Returns\n -------\n velocity_model : ndarray of shape (nz, nx)\n Velocity model grid in m/s.\n \n Note\n ----\n Currently only available in 2-D.\n \"\"\"\n # Check inputs\n if not isinstance(spl, np.ndarray) and spl.ndim != 2:\n raise ValueError(\"spl must be a 2-D ndarray\")\n if np.any(np.array(spl.shape) < 4):\n raise ValueError(\"spl grid shape must be at least 4\")\n if not isinstance(grid_shape, (list, tuple, np.ndarray)):\n raise ValueError(\"grid_shape must be a list, tuple or ndarray\")\n if len(grid_shape) != spl.ndim:\n raise ValueError(\"grid_shape should be of length %d, got %d\" \\\n % (spl.ndim, len(grid_shape)))\n if np.any(np.array(grid_shape) <= 3) or not np.all([ isinstance(i, int) for i in grid_shape ]):\n raise ValueError(\"elements in grid_shape must be integers greater than 3\")\n if not 2 <= order <= np.min(spl.shape):\n raise ValueError(\"order should be in [ %d, %d ], got %d\" % (2, np.min(spl.shape), order))\n if not isinstance(order, int) or not isinstance(n_threads, int) or n_threads < 1:\n raise ValueError(\"n_threads must be atleast 1, got %s\" % n_threads)\n \n # B-spline interpolation\n nz_spl, nx_spl = spl.shape\n nz, nx = grid_shape\n zaxis = np.arange(nz)\n xaxis = np.arange(nx)\n zaxis_spl = np.linspace(0, nz-1, nz_spl)\n xaxis_spl = np.linspace(0, nx-1, nx_spl)\n return bspline2(xaxis_spl, zaxis_spl, spl, xaxis, zaxis, order, n_threads)" ]
[ [ "numpy.linspace", "numpy.min", "numpy.asarray", "numpy.arange", "scipy.interpolate.RegularGridInterpolator", "numpy.array", "numpy.meshgrid" ] ]
iamalex5156/Vehicle-Indicator-Toolset
[ "cfc46f408cf3cc04a4f0c0f8841189b9dc8ffc2c" ]
[ "integrateyolo.py" ]
[ "import numpy as np\nimport cv2\nimport imutils\nimport time\nfrom sklearn.metrics import pairwise\nfrom imutils.video import FPS\nimport copy\nimport os\nimport sys\nimport tensorflow as tf\nimport pathlib\nfrom collections import defaultdict\n\nfrom utils import tracking_utils \nfrom utils import signalDetection_utils \nfrom utils import estimate_collide_utils\nfrom utils import estimate_stepping_utils\nfrom utils import lane_detection_utils\nfrom utils import break_light_utils\nfrom utils import functions\n\nfrom utils import ops as utils_ops\nfrom utils import label_map_util\n\n\n\n\n\n\n# print(category_index)\ncolors = np.random.uniform(0, 255, size=(100, 3))\nfont = cv2.FONT_HERSHEY_SIMPLEX\nblackLower = (0 , 0 , 0)\nblackUpper = (180 , 255 , 35)\n\n\n\n\n\n\n\n\n\ndef click_and_crop(event, x, y, flags, param):\n global refPt\n # if the left mouse button was clicked, record the starting (x, y) coordinates\n if event == cv2.EVENT_LBUTTONDOWN:\n refPt.append([x, y])\n\n\n\n\n\ndef confirm_day_or_night(frame , flag_night_counter):\n blurred = cv2.GaussianBlur(frame, (11, 11), 0)\n hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, blackLower , blackUpper)\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask , None, iterations=2)\n # cv2.imshow('black',imutils.resize(mask,width=250))\n # cv2.imshow('frame',frame)\n pixel_ct = 0\n pixel_len = 0\n for i in mask:\n pixel_ct = pixel_ct + np.sum(i==0)\n pixel_len = pixel_len + len(i)\n ratio = pixel_ct / pixel_len\n print(\"ratio = \",ratio)\n if ratio < 0.6:\n flag_night_counter = flag_night_counter + 1\n return flag_night_counter\n else:\n flag_night_counter = flag_night_counter - 1 \n return flag_night_counter\n\n\n\nnet = cv2.dnn.readNet(\"../yolov3.weights\", \"../yolov3.cfg\")\nclasses = []\nwith open(\"../coco.names\", \"r\") as f:\n classes = [line.strip() for line in f.readlines()]\nlayer_names = net.getLayerNames()\noutput_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n\n\n# flagPerson and areaPerson are pedestrian crash\nflagPerson = 0\nareaPerson = 0\nareaDetails = []\n\n# crash_count_frames are vehicle warning variables\ncrash_count_frames = 0\n\n# signalCounter and flagSignal are traffic signal variables\nsignalCounter = -99999\nflagSignal = [0] * 10\n\n# number and prev_frame are tracking variables\nnumber = 0\nprev_frame = []\n\n# flagLanes are lane variables\nflagLanes = [0] * 20\n\ndef yolo_infer(dashPointer , lanePointer , frame):\n global flagPerson , areaPerson , areaDetails\n global crash_count_frames\n global signalCounter , flagSignal\n global prev_frame , number\n global flagLanes\n\n image_np = np.array(frame)\n lane_image = copy.deepcopy(image_np)\n height,width,channel = image_np.shape\n\n mask = 255*np.ones_like(image_np)\n vertices = np.array(dashPointer, np.int32)\n cv2.fillPoly(mask, [vertices], [0,0,0])\n image_np = cv2.bitwise_and(image_np, mask)\n\n # Detecting objects\n blob = cv2.dnn.blobFromImage(image_np, 0.00392, (416,416), (0, 0, 0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(output_layers)\n\n confidencesCars , boxesCars = [] , []\n confidencesLights , boxesLights = [] , []\n confidencesPersons , boxesPersons = [] , []\n\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n if (class_id == 2 or class_id == 5 or class_id == 7):\n\n scr = scores[class_id]\n center_x = detection[0] * width\n center_y = detection[1] * height\n\n w = detection[2] * width\n h = detection[3] * height\n xmin = center_x - w / 2\n ymin = center_y - h / 2\n if (w*h >=800):\n confidencesCars.append(float(scr))\n boxesCars.append([int(xmin) , int(ymin) , int(w) , int(h)])\n elif class_id == 0:\n # print(scores)\n\n scr = scores[class_id]\n center_x = detection[0] * width\n center_y = detection[1] * height\n\n w = detection[2] * width\n h = detection[3] * height\n xmin = center_x - w / 2\n ymin = center_y - h / 2\n confidencesPersons.append(float(scr))\n boxesPersons.append([int(xmin) , int(ymin) , int(w) , int(h)])\n\n elif class_id == 9:\n scr = scores[class_id]\n center_x = detection[0] * width\n center_y = detection[1] * height\n\n w = detection[2] * width\n h = detection[3] * height\n xmin = center_x - w / 2\n ymin = center_y - h / 2\n confidencesLights.append(float(scr))\n boxesLights.append([int(xmin) , int(ymin) , int(w) , int(h)])\n\n\n indexesLights = cv2.dnn.NMSBoxes(boxesLights, confidencesLights, 0.5, 0.4)\n indexesCars = cv2.dnn.NMSBoxes(boxesCars, confidencesCars, 0.5, 0.4)\n indexesPersons = cv2.dnn.NMSBoxes(boxesPersons, confidencesPersons, 0.5, 0.4)\n\n image_np , signalCounter , flagSignal = signalDetection_utils.signalDetection(indexesLights , boxesLights , image_np , signalCounter , flagSignal)\n image_np , prev_frame , number = tracking_utils.tracking(indexesCars , boxesCars , image_np , prev_frame , number)\n image_np , crash_count_frames = estimate_collide_utils.estimate_collide(indexesCars , boxesCars , image_np , crash_count_frames)\n image_np , flagPerson , areaPerson , areaDetails = estimate_stepping_utils.estimate_stepping(indexesPersons , boxesPersons , image_np, flagPerson , areaPerson , areaDetails)\n\n cv2.putText(image_np,\"DAY\",(width - 200 ,50), font, 2, (167,133,0) , 2 , cv2.LINE_AA)\n\n image_np , flagLanes = lane_detection_utils.draw_lines(lanePointer , dashPointer , lane_image , image_np , flagLanes)\n\n cv2.imshow(\"finally\", image_np)\n\n\n\n\n\n\n\ndef selectRegions(image , text , flag):\n global refPt\n clone = copy.deepcopy(image)\n while True:\n key = cv2.waitKey(1) & 0xFF\n # display the image and wait for a keypress\n if flag==1:\n cv2.putText(image, text , (240,30), font , 1.2, [0,255,255], 2,cv2.LINE_AA)\n cv2.putText(image, \"Press 'r' key to reset everything.\", (290,70), font , 1.2, [0,255,255], 2,cv2.LINE_AA)\n cv2.putText(image, \"Press 'd' key if the region selection is done.\", (180,110), font , 1.2, [0,255,255], 2,cv2.LINE_AA)\n else:\n cv2.putText(image, text , (240,30), font , 1.2, [0,255,0], 2,cv2.LINE_AA)\n cv2.putText(image, \"Press 'r' key to reset everything.\", (290,70), font , 1.2, [0,255,0], 2,cv2.LINE_AA)\n cv2.putText(image, \"Press 'd' key if the region selection is done.\", (180,110), font , 1.2, [0,255,0], 2,cv2.LINE_AA)\n\n for pt in range(len(refPt)-1):\n pt1 , pt2 = refPt[pt] , refPt[pt+1]\n cv2.line(image, (pt1[0],pt1[1]), (pt2[0],pt2[1]), [0,255,255], 3) \n\n cv2.imshow(\"ROI\", image)\n if key == ord(\"r\"):\n image = copy.deepcopy(clone)\n refPt = []\n elif key == ord(\"d\"):\n if flag == 1:\n return 0\n elif flag == 2 and len(refPt) > 2:\n return 0\n elif key == ord('q'):\n return 1\n\n\n\n\n\n\n\ndef night():\n global refPt\n _ , image = cap.read()\n image=imutils.resize(image, width=1280)\n\n ctt = 0\n\n Quit = selectRegions(image , \"Click points to select your vehicle dash.\" , 1)\n dashPointer = refPt\n if len(dashPointer) <= 2:\n dashPointer = [[0,0], [0,0], [0,0]]\n refPt = []\n print(\"For dash: \",dashPointer)\n if Quit == 1:\n return\n cv2.destroyWindow(\"ROI\")\n\n fps = FPS().start()\n while True:\n _,frame = cap.read()\n frame=imutils.resize(frame, width=1280)\n\n if _ == False:\n break\n\n # print(ctt ,fps._numFrames)\n # ctt = ctt + 1\n break_light_utils.break_light(dashPointer , frame)\n\n # cv2.imshow(\"original\",frame)\n key = cv2.waitKey(1) & 0xFF\n fps.update()\n if key == ord('q'):\n break\n fps.stop()\n print(\"[INFO] elasped time: {:.2f}\".format(fps.elapsed()))\n print(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\n\n\n\ndef day():\n global refPt\n _ , image = cap.read()\n image=imutils.resize(image, width=1280)\n\n ctt = 0\n\n Quit = selectRegions(copy.deepcopy(image) , \"Click points to select your vehicle dash.\" , 1)\n dashPointer = refPt\n if len(dashPointer) <= 2:\n dashPointer = [[0,0], [0,0], [0,0]]\n refPt = []\n print(\"For dash: \",dashPointer)\n if Quit == 1:\n return\n\n Quit = selectRegions(copy.deepcopy(image) , \"Click points to select bird's eye view.\" , 2)\n lanePointer = refPt\n print(\"For lanes: \",lanePointer)\n if Quit == 1:\n return\n\n cv2.destroyWindow(\"ROI\")\n\n fps = FPS().start()\n while True:\n _,frame = cap.read()\n frame = imutils.resize(frame, width=1280)\n if _ == False:\n break\n # print(ctt ,fps._numFrames)\n # ctt = ctt + 1\n\n yolo_infer(dashPointer , lanePointer , frame)\n\n # cv2.imshow(\"original\",frame)\n key = cv2.waitKey(1) & 0xFF\n fps.update()\n if key == ord('q'):\n break\n\n fps.stop()\n print(\"[INFO] elasped time: {:.2f}\".format(fps.elapsed()))\n print(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\n\n\n\n\nrefPt = [] # to store refernece pointers\nflag_night_counter = 0 # counter to count night frames\n\ncap=cv2.VideoCapture('../videos/r.mp4')\nstart_frame = 0*30\ncap.set(1,start_frame)\n_ , image = cap.read()\nimage=imutils.resize(image, width=1280)\ncv2.namedWindow(\"ROI\")\ncv2.setMouseCallback(\"ROI\", click_and_crop)\n\nfor z in range(10):\n (grabbed, frame) = cap.read()\n frame=imutils.resize(frame, width=1280)\n height,width,channel = frame.shape\n\n flag_night_counter = confirm_day_or_night(frame , flag_night_counter)\nprint(\"flag_night_counter = \",flag_night_counter)\ncap.set(1 , start_frame)\n\nif flag_night_counter > 4:\n night()\nelse:\n day()\n\n\n\ncv2.destroyAllWindows()\ncap.release()\n" ]
[ [ "numpy.ones_like", "numpy.argmax", "numpy.random.uniform", "numpy.array", "numpy.sum" ] ]
anslt/efficientdet_mask
[ "7ee75894a3f3c6428484e85b41f1ffedd657f128" ]
[ "maskrcnn_benchmark/modeling/backbone/efficientdet_backbone.py" ]
[ "# Author: Zylo117\n\nimport math\n\nimport torch\nfrom torch import nn\n\nfrom .efficientdet import BiFPN, Regressor, Classifier, EfficientNet\n# from .efficientdet.utils import Anchors\n\n\nclass EfficientDetBackbone(nn.Module):\n def __init__(self, cfg, **kwargs):\n super(EfficientDetBackbone, self).__init__()\n compound_coef = cfg.EFFICIENTNET.COEF\n load_weights = cfg.EFFICIENTNET.LOAD_WEIGHTS\n \n #TODO: fix number of classes\n num_classes = 80\n\n self.compound_coef = compound_coef\n\n self.backbone_compound_coef = [0, 1, 2, 3, 4, 5, 6, 6]\n self.fpn_num_filters = [64, 88, 112, 160, 224, 288, 384, 384]\n self.fpn_cell_repeats = [3, 4, 5, 6, 7, 7, 8, 8]\n self.input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536]\n self.box_class_repeats = [3, 3, 3, 4, 4, 4, 5, 5]\n self.anchor_scale = [4., 4., 4., 4., 4., 4., 4., 5.]\n self.aspect_ratios = kwargs.get('ratios', [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)])\n self.num_scales = len(kwargs.get('scales', [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]))\n conv_channel_coef = {\n # the channels of P3/P4/P5.\n 0: [40, 112, 320],\n 1: [40, 112, 320],\n 2: [48, 120, 352],\n 3: [48, 136, 384],\n 4: [56, 160, 448],\n 5: [64, 176, 512],\n 6: [72, 200, 576],\n 7: [72, 200, 576],\n }\n\n num_anchors = len(self.aspect_ratios) * self.num_scales\n\n self.bifpn = nn.Sequential(\n *[BiFPN(self.fpn_num_filters[self.compound_coef],\n conv_channel_coef[compound_coef],\n True if _ == 0 else False,\n attention=True if compound_coef < 6 else False)\n for _ in range(self.fpn_cell_repeats[compound_coef])])\n\n # self.num_classes = num_classes\n # self.regressor = Regressor(in_channels=self.fpn_num_filters[self.compound_coef], num_anchors=num_anchors, num_layers=self.box_class_repeats[self.compound_coef])\n # self.classifier = Classifier(in_channels=self.fpn_num_filters[self.compound_coef], num_anchors=num_anchors,num_classes=num_classes,num_layers=self.box_class_repeats[self.compound_coef])\n # self.anchors = Anchors(anchor_scale=self.anchor_scale[compound_coef], **kwargs)\n\n self.backbone_net = EfficientNet(self.backbone_compound_coef[compound_coef], load_weights)\n\n def freeze_bn(self):\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n\n def forward(self, inputs):\n max_size = inputs.shape[-1]\n\n _, p3, p4, p5 = self.backbone_net(inputs)\n\n features = (p3, p4, p5)\n features = self.bifpn(features)\n\n #regression = self.regressor(features)\n #classification = self.classifier(features)\n #anchors = self.anchors(inputs, inputs.dtype)\n\n return features #, regression, classification, anchors\n\n def init_backbone(self, path):\n state_dict = torch.load(path)\n try:\n ret = self.load_state_dict(state_dict, strict=False)\n print(ret)\n except RuntimeError as e:\n print('Ignoring ' + str(e) + '\"')\n" ]
[ [ "torch.load" ] ]
blakete/Wearable-Data-Analysis
[ "05fa449db482f28adccb22b7a423099d84ba253a" ]
[ "training_data_flow_from_directory.py" ]
[ "# Author: Blake Edwards\nimport os\nimport re\nimport sys\n\nimport math\nimport numpy as np\nimport pandas as pd\nfrom sys import exit\nfrom glob import glob\nfrom dateutil import parser\nfrom datetime import datetime\n\nif len(sys.argv) != 2:\n print(\"[INCORRECT USAGE] try $ python3 /path/to/downloaded/dataset>\")\n exit()\ncollected_data_path = sys.argv[1]\n\nnp.set_printoptions(suppress=True)\nimport matplotlib.pyplot as plt;\n\nplt.rcdefaults()\n\nshift = 15\nwindow_size = 45\nmean_diff_threshold = 20000\ntraining_samples = []\ntraining_targets = []\n\nclasses = glob(os.path.join(collected_data_path, \"*\"))\nfor i in range(0, len(classes)):\n classes[i] = re.sub(str(collected_data_path) + str('/'), \"\", classes[i])\nclasses = sorted(classes, key=str.lower)\nprint(classes)\ndustbin_label_loc = classes.index(\"dustbin\")\n\nprocessed = np.zeros(len(classes))\nfailed = np.zeros(len(classes))\nfor i in range(0, len(classes)):\n curr_data_path = os.path.join(collected_data_path, classes[i])\n print(f\"processing class: {classes[i]}\")\n csvs = glob(os.path.join(curr_data_path, \"*.csv\"))\n for k in range(0, len(csvs)):\n print(f\"processing {csvs[k]}\")\n raw_data = pd.read_csv(csvs[k])\n try:\n raw_numpy = raw_data[[\"accelerometerTimestamp_sinceReboot(s)\", \"accelerometerAccelerationX(G)\", \"accelerometerAccelerationY(G)\",\n \"accelerometerAccelerationZ(G)\"]].to_numpy()\n except:\n print(\"[ERROR] Loaded data contains incorrect columnn names\")\n continue\n # todo remove redundant loops\n for j in range(0, len(raw_numpy)):\n raw_numpy[j][0] = raw_numpy[j][0] * 1000\n # print(raw_numpy)\n for j in range(30, len(raw_numpy) - (window_size + 30), 15):\n sample = raw_numpy[j:j + window_size]\n sample_diffs = np.diff(sample[:, 0], n=1, axis=0)\n std = np.std(sample_diffs, axis=0)\n mean = np.mean(sample_diffs, axis=0)\n diff = abs((1000 / 15) - mean)\n idx = classes.index(classes[i])\n if diff < mean_diff_threshold:\n # print(f\"Processing sample with mean: {mean}\")\n processed[idx] += 1\n training_samples.append(sample[:, 1:])\n target = np.zeros(len(classes))\n target[classes.index(classes[i])] = 1\n training_targets.append(target)\n # print(sample_diffs)\n # plt.subplot(211)\n # plt.title(f'{classes[i]} | Milliseconds between samples')\n # plt.plot(np.arange(1, window_size), sample_diffs)\n # plt.ylim([min(sample_diffs)-5, max(sample_diffs)+5])\n # plt.subplot(212)\n # plt.title(\"Accelerometer sample window\")\n # plt.plot(np.arange(1, window_size+1), raw_data['accelerometerAccelerationX(G)'][j:j + window_size], 'r')\n # plt.plot(np.arange(1, window_size + 1), raw_data['accelerometerAccelerationY(G)'][j:j + window_size], 'g')\n # plt.plot(np.arange(1, window_size + 1), raw_data['accelerometerAccelerationZ(G)'][j:j + window_size], 'b')\n # max_y = max(max(raw_data['accelerometerAccelerationX(G)'][j:j + window_size]), max(raw_data['accelerometerAccelerationY(G)'][j:j + window_size]), max(raw_data['accelerometerAccelerationZ(G)'][j:j + window_size]))\n # min_y = min(min(raw_data['accelerometerAccelerationX(G)'][j:j + window_size]), min(raw_data['accelerometerAccelerationY(G)'][j:j + window_size]), min(raw_data['accelerometerAccelerationZ(G)'][j:j + window_size]))\n # plt.ylim([min_y-2, max_y+2])\n # plt.show()\n # plt.savefig(f'samples/{classes[i]}_{processed[idx]}.png')\n # plt.subplot(212).cla()\n # plt.subplot(211).cla()\n else:\n print(f\"Dropping sample with mean: {mean}\")\n failed[idx] += 1\n # print(sample_diffs)\n # plt.subplot(211)\n # plt.title(f'{classes[i]} | Milliseconds between samples')\n # plt.plot(np.arange(1, window_size), sample_diffs)\n # plt.ylim([min(sample_diffs)-100, max(sample_diffs)+100])\n # plt.subplot(212)\n # plt.title(\"Accelerometer sample window\")\n # plt.plot(np.arange(1, window_size+1), raw_data['accelerometerAccelerationX(G)'][j:j + window_size], 'r')\n # plt.plot(np.arange(1, window_size + 1), raw_data['accelerometerAccelerationY(G)'][j:j + window_size], 'g')\n # plt.plot(np.arange(1, window_size + 1), raw_data['accelerometerAccelerationZ(G)'][j:j + window_size], 'b')\n # plt.ylim([-4.75, 4.75])\n # plt.show()\n\n# saving training data\ntraining_samples = np.asarray(training_samples)\ntraining_targets = np.asarray(training_targets)\n\n# create set of dustbin samples\nnum_dustbin_samples = 1000\ndustbin_samples = np.random.uniform(-3, 3, size=(num_dustbin_samples, 45, 3))\n# dustbin_samples = np.concatenate((dustbin_samples, np.random.uniform(-2, 2, size=(num_dustbin_samples, 45, 3))))\n# dustbin_samples = np.concatenate((dustbin_samples, np.random.uniform(-1, 1, size=(num_dustbin_samples, 45, 3))))\ndustbin_labels = np.zeros((len(dustbin_samples), len(classes)))\ndustbin_labels[:, dustbin_label_loc] = 1\n# print(dustbin_labels)\n\n# generate random noise for dust bin class\ntrain_x = np.concatenate((training_samples, dustbin_samples))\ntrain_y = np.concatenate((training_targets, dustbin_labels))\n\nnp.save(\"training_samples\", np.asarray(train_x))\nnp.save(\"training_targets\", np.asarray(train_y))\n\nprint(f'\\nClasses: {classes}')\nprint(f'Successful: {processed}')\nprint(f'Failed: {failed}')\n\nprint(f'\\nSuccesfully processed {(sum(processed)) / float((sum(failed) + sum(processed))) * 100}% of the dataset')\nprint(f'Total samples: {sum(failed) + sum(processed)}')\nprint(f'Failed samples: {sum(failed)}')\nprint(f'Successful samples: {sum(processed)}')\n\nprint(f'\\nClasses: {classes} \\nSamples: {np.sum(train_y, axis=0)}')\n" ]
[ [ "pandas.read_csv", "numpy.asarray", "numpy.set_printoptions", "numpy.concatenate", "numpy.std", "numpy.diff", "numpy.mean", "numpy.random.uniform", "matplotlib.pyplot.rcdefaults", "numpy.sum" ] ]
mcfrank/dmww
[ "cdacac49c4574f6d1cd48c4c8a4914432c26d9fb" ]
[ "sampling_helper.py" ]
[ "import numpy as np\nfrom random import *\nfrom scipy.special import gammaln\nfrom scipy import stats\n\n########################################################################\n## PROPOSAL DISTRIBUTIONS FOR HYPERPARAMETER INFERENCE\n\n# proposal function for variables that can't go below zero\ndef alter_0inf_var(ob):\n nb = ob\n\n if random() > .5:\n nb = ob + np.exp(gauss(0, 2))\n else:\n nb = ob - np.exp(gauss(0, 2))\n\n # can't go below zero\n if nb < 0:\n nb = ob\n\n return nb\n\n# proposal function for hyperparameter variables that are 0-1 bounded\ndef alter_01_var(ob):\n nb = ob\n\n nb = ob + inv_logit(gauss(-6, 2))\n\n # can't go below zero or above one\n if nb <= 0 or nb >= 1:\n nb = ob\n\n return nb\n\n# inverse logistic function\ndef inv_logit(x):\n y = np.exp(x) / (1 + np.exp(x))\n\n return y\n\n########################################################################\n## FUNCTIONS FOR DIRICHLET-MULTINOMIAL UPDATES\n\n# dirichlet multinomial conjugate posterior\ndef score_dm(c, alpha):\n k = len(c)\n n = sum(c)\n ls = sum(gammaln(np.add(c, alpha / k))) - \\\n gammaln((alpha / k)) * k + \\\n gammaln(alpha) - \\\n gammaln(n + alpha)\n\n return ls\n\n\n# DM conjugate prior update based on changing count by -1\ndef update_dm_minus(old_score, c, a, i):\n k = len(c)\n n = sum(c)\n new_score = old_score - np.log(c[i] + a / k) + np.log(n + a)\n return new_score\n\n\n# DM conjugate prior update based on changing count by 1\ndef update_dm_plus(old_score, c, a, i):\n k = len(c)\n n = sum(c)\n new_score = old_score - np.log(n + a - 1) + np.log(c[i] + a / k - 1)\n return new_score\n\n########################################################################\n## MISC\n\ndef nans(desired_shape, dtype=float):\n a = np.empty(desired_shape, dtype)\n a.fill(np.nan)\n return a\n\ndef neg_infs(desired_shape, dtype=float):\n a = np.empty(desired_shape, dtype)\n a.fill(-np.inf)\n return a" ]
[ [ "numpy.log", "scipy.special.gammaln", "numpy.add", "numpy.exp", "numpy.empty" ] ]
modelhub-ai/modelhub-engine
[ "81e893fb7669ee9912178346efbf828dd8c0410b" ]
[ "framework/modelhubapi_tests/mockmodels/contrib_src_si/inference.py" ]
[ "\"\"\"\nImplementation of several mock models to test the API. Each model has a\nslightly different behaviour, which should be properly handled by the API.\nAlso most models are not fully valid, e.g. they do not comply to the mock\nconfig. This is ok for unit testing, most models are only used for a small\nset of specific tests requiring that model's specific behavioural aspect.\n\nThese models test based on the single input mock model (contrib_src_si) with\nmultiple outputs.\n\"\"\"\n\nimport os\nimport numpy as np\nfrom modelhublib.model import ModelBase\n\n\nclass Model(ModelBase):\n\n def __init__(self):\n pass\n\n def infer(self, input):\n if os.path.isfile(input):\n # Return a constant classification result no matter what's the input\n label_list = [{\"label\": \"class_0\", 'probability': 0.3},\n {\"label\": \"class_1\", 'probability': 0.7}]\n mask = np.asarray([[0,1,1,0],[0,2,2,0]])\n return [label_list, mask]\n else:\n raise IOError(\"File \" + input + \" does not exist.\")\n\n\nclass ModelReturnsOneNumpyArray(ModelBase):\n\n def __init__(self):\n pass\n\n def infer(self, input):\n if os.path.isfile(input):\n # Return a constant classification result no matter what's the input\n mask = np.asarray([[0,1,1,0],[0,2,2,0]])\n return mask\n else:\n raise IOError(\"File \" + input + \" does not exist.\")\n\n\nclass ModelReturnsListOfOneNumpyArray(ModelReturnsOneNumpyArray):\n\n def infer(self, input):\n return [super(ModelReturnsListOfOneNumpyArray, self).infer(input)]\n\n\nclass ModelReturnsOneLabelList(ModelBase):\n\n def __init__(self):\n pass\n\n def infer(self, input):\n if os.path.isfile(input):\n # Return a constant classification result no matter what's the input\n label_list = [{\"label\": \"class_0\", \"probability\": 0.3},\n {\"label\": \"class_1\", \"probability\": 0.7}]\n return label_list\n else:\n raise IOError(\"File \" + input + \" does not exist.\")\n\n\nclass ModelReturnsListOfOneLabelList(ModelReturnsOneLabelList):\n\n def infer(self, input):\n return [super(ModelReturnsListOfOneLabelList, self).infer(input)]\n\n\nclass ModelThrowingError(ModelBase):\n\n def __init__(self):\n pass\n\n def infer(self, input):\n raise NotImplementedError\n" ]
[ [ "numpy.asarray" ] ]
LielinJiang/PaddleSeg
[ "7c4d39da3d0ff635cac066aeb61e23dfada5d0d7" ]
[ "slim/nas/eval_nas.py" ]
[ "# coding: utf8\n# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n# GPU memory garbage collection optimization flags\nos.environ['FLAGS_eager_delete_tensor_gb'] = \"0.0\"\n\nimport sys\n\nLOCAL_PATH = os.path.dirname(os.path.abspath(__file__))\nSEG_PATH = os.path.join(LOCAL_PATH, \"../../\", \"pdseg\")\nsys.path.append(SEG_PATH)\n\nimport time\nimport argparse\nimport functools\nimport pprint\nimport cv2\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\n\nfrom utils.config import cfg\nfrom utils.timer import Timer, calculate_eta\nfrom model_builder import build_model\nfrom model_builder import ModelPhase\nfrom reader import SegDataset\nfrom metrics import ConfusionMatrix\n\nfrom mobilenetv2_search_space import MobileNetV2SpaceSeg\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='PaddleSeg model evalution')\n parser.add_argument(\n '--cfg',\n dest='cfg_file',\n help='Config file for training (and optionally testing)',\n default=None,\n type=str)\n parser.add_argument(\n '--use_gpu',\n dest='use_gpu',\n help='Use gpu or cpu',\n action='store_true',\n default=False)\n parser.add_argument(\n '--use_mpio',\n dest='use_mpio',\n help='Use multiprocess IO or not',\n action='store_true',\n default=False)\n parser.add_argument(\n 'opts',\n help='See utils/config.py for all options',\n default=None,\n nargs=argparse.REMAINDER)\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n return parser.parse_args()\n\n\ndef evaluate(cfg, ckpt_dir=None, use_gpu=False, use_mpio=False, **kwargs):\n np.set_printoptions(precision=5, suppress=True)\n\n startup_prog = fluid.Program()\n test_prog = fluid.Program()\n dataset = SegDataset(\n file_list=cfg.DATASET.VAL_FILE_LIST,\n mode=ModelPhase.EVAL,\n data_dir=cfg.DATASET.DATA_DIR)\n\n def data_generator():\n #TODO: check is batch reader compatitable with Windows\n if use_mpio:\n data_gen = dataset.multiprocess_generator(\n num_processes=cfg.DATALOADER.NUM_WORKERS,\n max_queue_size=cfg.DATALOADER.BUF_SIZE)\n else:\n data_gen = dataset.generator()\n\n for b in data_gen:\n yield b[0], b[1], b[2]\n\n py_reader, avg_loss, pred, grts, masks = build_model(\n test_prog, startup_prog, phase=ModelPhase.EVAL, arch=kwargs['arch'])\n\n py_reader.decorate_sample_generator(\n data_generator, drop_last=False, batch_size=cfg.BATCH_SIZE)\n\n # Get device environment\n places = fluid.cuda_places() if use_gpu else fluid.cpu_places()\n place = places[0]\n dev_count = len(places)\n print(\"#Device count: {}\".format(dev_count))\n\n exe = fluid.Executor(place)\n exe.run(startup_prog)\n\n test_prog = test_prog.clone(for_test=True)\n\n ckpt_dir = cfg.TEST.TEST_MODEL if not ckpt_dir else ckpt_dir\n\n if not os.path.exists(ckpt_dir):\n raise ValueError('The TEST.TEST_MODEL {} is not found'.format(ckpt_dir))\n\n if ckpt_dir is not None:\n print('load test model:', ckpt_dir)\n fluid.io.load_params(exe, ckpt_dir, main_program=test_prog)\n\n # Use streaming confusion matrix to calculate mean_iou\n np.set_printoptions(\n precision=4, suppress=True, linewidth=160, floatmode=\"fixed\")\n conf_mat = ConfusionMatrix(cfg.DATASET.NUM_CLASSES, streaming=True)\n fetch_list = [avg_loss.name, pred.name, grts.name, masks.name]\n num_images = 0\n step = 0\n all_step = cfg.DATASET.TEST_TOTAL_IMAGES // cfg.BATCH_SIZE + 1\n timer = Timer()\n timer.start()\n py_reader.start()\n while True:\n try:\n step += 1\n loss, pred, grts, masks = exe.run(\n test_prog, fetch_list=fetch_list, return_numpy=True)\n\n loss = np.mean(np.array(loss))\n\n num_images += pred.shape[0]\n conf_mat.calculate(pred, grts, masks)\n _, iou = conf_mat.mean_iou()\n _, acc = conf_mat.accuracy()\n\n speed = 1.0 / timer.elapsed_time()\n\n print(\n \"[EVAL]step={} loss={:.5f} acc={:.4f} IoU={:.4f} step/sec={:.2f} | ETA {}\"\n .format(step, loss, acc, iou, speed,\n calculate_eta(all_step - step, speed)))\n timer.restart()\n sys.stdout.flush()\n except fluid.core.EOFException:\n break\n\n category_iou, avg_iou = conf_mat.mean_iou()\n category_acc, avg_acc = conf_mat.accuracy()\n print(\"[EVAL]#image={} acc={:.4f} IoU={:.4f}\".format(\n num_images, avg_acc, avg_iou))\n print(\"[EVAL]Category IoU:\", category_iou)\n print(\"[EVAL]Category Acc:\", category_acc)\n print(\"[EVAL]Kappa:{:.4f}\".format(conf_mat.kappa()))\n\n return category_iou, avg_iou, category_acc, avg_acc\n\n\ndef main():\n args = parse_args()\n if args.cfg_file is not None:\n cfg.update_from_file(args.cfg_file)\n if args.opts:\n cfg.update_from_list(args.opts)\n cfg.check_and_infer()\n print(pprint.pformat(cfg))\n evaluate(cfg, **args.__dict__)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.set_printoptions", "numpy.array" ] ]
dhruvbatra/habitat-api
[ "00a0d51c714c3cc64e0214390daeff9ab390f289" ]
[ "test/test_mp3d_eqa.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport time\n\nimport numpy as np\nimport pytest\n\nimport habitat\nimport habitat.datasets.eqa.mp3d_eqa_dataset as mp3d_dataset\nfrom habitat.config.default import get_config\nfrom habitat.core.embodied_task import Episode\nfrom habitat.core.logging import logger\nfrom habitat.datasets import make_dataset\n\nCFG_TEST = \"test/habitat_mp3d_eqa_test.yaml\"\nCLOSE_STEP_THRESHOLD = 0.028\n\n# List of episodes each from unique house\nTEST_EPISODE_SET = [1, 309, 807, 958, 696, 10, 297, 1021, 1307, 1569]\n\nRGB_EPISODE_MEANS = {\n 1: 123.1576333222566,\n 10: 123.86094605688947,\n 297: 122.69351220853402,\n 309: 118.95794969775298,\n 696: 115.71903709129052,\n 807: 143.7834237211494,\n 958: 141.97871610030387,\n 1021: 119.1051016229882,\n 1307: 102.11408987112925,\n 1569: 91.01973929495183,\n}\n\nEPISODES_LIMIT = 6\n\n\ndef get_minos_for_sim_eqa_config():\n _sim_eqa_c = get_config(CFG_TEST)\n _sim_eqa_c.task_name = \"EQA-v0\"\n _sim_eqa_c.dataset = mp3d_dataset.get_default_mp3d_v1_config()\n _sim_eqa_c.dataset.split = \"val\"\n _sim_eqa_c.scene = \"data/scene_datasets/mp3d/17DRP5sb8fy/17DRP5sb8fy.glb\"\n _sim_eqa_c.height = 512\n _sim_eqa_c.width = 512\n _sim_eqa_c.hfov = \"45\"\n _sim_eqa_c.vfov = \"45\"\n _sim_eqa_c.sensor_position = [0, 1.09, 0]\n _sim_eqa_c.forward_step_size = 0.1 # in metres\n _sim_eqa_c.turn_angle = 9 # in degrees\n _sim_eqa_c.sim = \"Sim-v0\"\n\n # Agent configuration\n agent_c = _sim_eqa_c.agents[0]\n agent_c.height = 1.5\n agent_c.radius = 0.1\n agent_c.mass = 32.0\n agent_c.linear_acceleration = 10.0\n agent_c.angular_acceleration = 5 * 3.14\n agent_c.linear_friction = 1.0\n agent_c.angular_friction = 1.0\n agent_c.coefficient_of_restitution = 0.15707963267\n\n return _sim_eqa_c\n\n\ndef check_json_serializaiton(dataset: habitat.Dataset):\n start_time = time.time()\n json_str = str(dataset.to_json())\n logger.info(\n \"JSON conversion finished. {} sec\".format((time.time() - start_time))\n )\n decoded_dataset = dataset.__class__()\n decoded_dataset.from_json(json_str)\n assert len(decoded_dataset.episodes) > 0\n episode = decoded_dataset.episodes[0]\n assert isinstance(episode, Episode)\n assert (\n decoded_dataset.to_json() == json_str\n ), \"JSON dataset encoding/decoding isn't consistent\"\n\n\ndef test_mp3d_eqa_dataset():\n dataset_config = get_config(CFG_TEST).DATASET\n if not mp3d_dataset.Matterport3dDatasetV1.check_config_paths_exist(\n dataset_config\n ):\n pytest.skip(\n \"Please download Matterport3D EQA dataset to \" \"data folder.\"\n )\n\n dataset = mp3d_dataset.Matterport3dDatasetV1(config=dataset_config)\n assert dataset\n assert (\n len(dataset.episodes) == mp3d_dataset.EQA_MP3D_V1_VAL_EPISODE_COUNT\n ), \"Test split episode number mismatch\"\n check_json_serializaiton(dataset)\n\n\ndef test_mp3d_eqa_sim():\n eqa_config = get_config(CFG_TEST)\n\n if not mp3d_dataset.Matterport3dDatasetV1.check_config_paths_exist(\n eqa_config.DATASET\n ):\n pytest.skip(\n \"Please download Matterport3D EQA dataset to \" \"data folder.\"\n )\n\n dataset = make_dataset(\n id_dataset=eqa_config.DATASET.TYPE, config=eqa_config.DATASET\n )\n env = habitat.Env(config=eqa_config, dataset=dataset)\n env.episodes = dataset.episodes[:EPISODES_LIMIT]\n\n assert env\n env.reset()\n while not env.episode_over:\n action = env.action_space.sample()\n assert env.action_space.contains(action)\n obs = env.step(action)\n if not env.episode_over:\n assert \"rgb\" in obs, \"RGB image is missing in observation.\"\n assert obs[\"rgb\"].shape[:2] == (\n eqa_config.SIMULATOR.RGB_SENSOR.HEIGHT,\n eqa_config.SIMULATOR.RGB_SENSOR.WIDTH,\n ), (\n \"Observation resolution {} doesn't correspond to config \"\n \"({}, {}).\".format(\n obs[\"rgb\"].shape[:2],\n eqa_config.SIMULATOR.RGB_SENSOR.HEIGHT,\n eqa_config.SIMULATOR.RGB_SENSOR.WIDTH,\n )\n )\n\n env.close()\n\n\ndef test_mp3d_eqa_sim_correspondence():\n eqa_config = get_config(CFG_TEST)\n\n if not mp3d_dataset.Matterport3dDatasetV1.check_config_paths_exist(\n eqa_config.DATASET\n ):\n pytest.skip(\n \"Please download Matterport3D EQA dataset to \" \"data folder.\"\n )\n\n dataset = make_dataset(\n id_dataset=eqa_config.DATASET.TYPE, config=eqa_config.DATASET\n )\n env = habitat.Env(config=eqa_config, dataset=dataset)\n env.episodes = [\n episode\n for episode in dataset.episodes\n if int(episode.episode_id) in TEST_EPISODE_SET[:EPISODES_LIMIT]\n ]\n\n ep_i = 0\n cycles_n = 2\n while cycles_n > 0:\n env.reset()\n episode = env.current_episode\n assert (\n len(episode.goals) == 1\n ), \"Episode has no goals or more than one.\"\n assert (\n len(episode.shortest_paths) == 1\n ), \"Episode has no shortest paths or more than one.\"\n start_state = env.sim.get_agent_state()\n assert np.allclose(\n start_state.position, episode.start_position\n ), \"Agent's start position diverges from the shortest path's one.\"\n\n rgb_mean = 0\n logger.info(\n \"{id} {question}\\n{answer}\".format(\n id=episode.episode_id,\n question=episode.question.question_text,\n answer=episode.question.answer_text,\n )\n )\n\n for step_id, point in enumerate(episode.shortest_paths[0]):\n cur_state = env.sim.get_agent_state()\n\n logger.info(\n \"diff position: {} diff rotation: {} \"\n \"cur_state.position: {} shortest_path.position: {} \"\n \"cur_state.rotation: {} shortest_path.rotation: {} action: {}\"\n \"\".format(\n cur_state.position - point.position,\n cur_state.rotation - point.rotation,\n cur_state.position,\n point.position,\n cur_state.rotation,\n point.rotation,\n point.action,\n )\n )\n\n assert np.allclose(\n [cur_state.position[0], cur_state.position[2]],\n [point.position[0], point.position[2]],\n atol=CLOSE_STEP_THRESHOLD * (step_id + 1),\n ), \"Agent's path diverges from the shortest path.\"\n\n obs = env.step(point.action)\n\n if not env.episode_over:\n rgb_mean += obs[\"rgb\"][:, :, :3].mean()\n\n if ep_i < len(RGB_EPISODE_MEANS):\n rgb_mean = rgb_mean / len(episode.shortest_paths[0])\n assert np.isclose(\n RGB_EPISODE_MEANS[int(episode.episode_id)], rgb_mean\n ), \"RGB output doesn't match the ground truth.\"\n\n ep_i = (ep_i + 1) % EPISODES_LIMIT\n if ep_i == 0:\n cycles_n -= 1\n\n env.close()\n" ]
[ [ "numpy.allclose" ] ]
Business-Wizard/License_Plate_Reader_App
[ "968217ffaa74ba636ea83f54ec9825567ca8699c" ]
[ "src/visualization/visualize.py" ]
[ "import matplotlib.pyplot as plt\nfrom tensorflow.keras.models import load_model\nfrom src.models.train_model import visualize_history\nfrom src.data.imageprocess import (pipeline_single, read_image,\n grayscale, threshold_image, dilate_image,\n blur_image, sample_image)\nfrom src.models.segmentation import detect_contours, segment_image\n\n\ndef visualize_image_process(grouped=True):\n img = read_image(sample_image)\n grayed = grayscale(img)\n threshed = threshold_image(grayed)\n dilated = dilate_image(threshed, ksize=(5, 5), iters=1)\n blurred = blur_image(dilated, ksize=(3, 3), div=15, gauss=True)\n segmented = detect_contours(blurred)[0]\n visuals_lst = [img, grayed, threshed, dilated, blurred, segmented]\n\n if grouped:\n fig, ax = plt.subplots(nrows=3, ncols=2, figsize=(11, 6), dpi=200)\n for idx, ax in enumerate(ax.flatten()):\n if idx == 0:\n ax.imshow(visuals_lst[idx])\n else:\n ax.imshow(visuals_lst[idx], cmap='gray')\n ax.set_xticks([], [])\n ax.set_yticks([], [])\n plt.tight_layout()\n plt.show()\n else:\n names_lst = [\"Unprocessed\", \"Grayscale\", \"Threshold\", \"Erode\", \"Blur\",\n \"Contour Detection\"]\n for plot, name in zip(visuals_lst, names_lst):\n fig, ax = plt.subplots(figsize=(8, 2), dpi=200)\n ax.set_xticks([], [])\n ax.set_yticks([], [])\n plt.imshow(plot, cmap='gray')\n plt.title(name)\n plt.tight_layout()\n plt.show()\n # plt.savefig(\"./images/\" + name + \".png\")\n\n\ndef visualize_segmentation(image=sample_image):\n img = pipeline_single(sample_image, dilatekernel=(3, 3),\n blurkernel=(5, 5), div=25, gauss=True)\n chars_lst = segment_image(img)\n fig2, ax2 = plt.subplots(nrows=1, ncols=7, figsize=(8, 2), dpi=200)\n for idx, ax in enumerate(ax2.flatten()):\n if idx == 0:\n ax.imshow(chars_lst[idx], cmap='gray')\n elif idx > len(chars_lst)-1:\n break\n else:\n ax.set_xticks([], [])\n ax.set_yticks([], [])\n ax.imshow(chars_lst[idx], cmap='gray')\n plt.tight_layout()\n # plt.savefig(\"./images/image_segments.png\")\n plt.show()\n\n\nif __name__ == '__main__':\n # visualize_image_process(grouped=False)\n # visualize_segmentation()\n model = load_model(\"./models/model_full\")\n visualize_history(model)\n" ]
[ [ "tensorflow.keras.models.load_model", "matplotlib.pyplot.imshow", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ] ]
eltonlanders/ga-learner-dsmp-repo
[ "750bb4dc8ea0cba8d0411e1e8d80aa036f7eaa38" ]
[ "Loan-Defaulters/code.py" ]
[ "# --------------\n#Importing header files\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\n# Code starts here\ndata=pd.read_csv(path)\n\nX=data.drop(['customer.id','paid.back.loan'],axis=1)\n\ny=data['paid.back.loan']\n\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=0)\n\n\n# Code ends here\n\n\n# --------------\n#Importing header files\nimport matplotlib.pyplot as plt\n\n# Code starts here\nfully_paid=y_train.value_counts()\n\nplt.bar(x=fully_paid.index,height=25)\n\n\n# Code ends here\n\n\n# --------------\n#Importing header files\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\n\n\n#Code starts here\n\n#Removing the last character from the values in column\nX_train['int.rate'] = X_train['int.rate'].map(lambda x: str(x)[:-1])\n\n#Dividing the column values by 100\nX_train['int.rate']=X_train['int.rate'].astype(float)/100\n\n#Removing the last character from the values in column\nX_test['int.rate'] = X_test['int.rate'].map(lambda x: str(x)[:-1])\n\n#Dividing the column values by 100\nX_test['int.rate']=X_test['int.rate'].astype(float)/100\n\n#Storing all the numerical type columns in 'num_df'\nnum_df=X_train.select_dtypes(include=['number']).copy()\n\n#Storing all the categorical type columns in 'cat_df'\ncat_df=X_train.select_dtypes(include=['object']).copy()\n\n#Code ends here\n\n\n\n# --------------\n#Importing header files\nimport seaborn as sns\n\n\n# Code starts here\ncols=list(num_df.columns)\n\nfig,axes=plt.subplots(9,1, figsize=(10,20))\n\nfor i in range(9):\n sns.boxplot(x=y_train,y=num_df[cols[i]],ax=axes[i])\n# Code ends here\n\n\n# --------------\n# Code starts here\ncols=list(cat_df.columns)\n\nfig,axes=plt.subplots(2,2,figsize=(10,20))\n\nfor i,j in range(2,2):\n sns.countplot(x=X_train[cols[i*2+j]],hue=y_train,ax=axes[i,j])\n\n\n\n\n\n# Code ends here\n\n\n# --------------\n#Importing header files\nfrom sklearn.tree import DecisionTreeClassifier\n\n#Code starts here\n\n#Looping through categorical columns\nfor col in cat_df.columns:\n \n #Filling null values with 'NA'\n X_train[col].fillna('NA',inplace=True)\n \n #Initalising a label encoder object\n le=LabelEncoder()\n \n #Fitting and transforming the column in X_train with 'le'\n X_train[col]=le.fit_transform(X_train[col]) \n \n #Filling null values with 'NA'\n X_test[col].fillna('NA',inplace=True)\n \n #Fitting the column in X_test with 'le'\n X_test[col]=le.transform(X_test[col]) \n\n# Replacing the values of y_train\ny_train.replace({'No':0,'Yes':1},inplace=True)\n\n# Replacing the values of y_test\ny_test.replace({'No':0,'Yes':1},inplace=True)\n\n#Initialising 'Decision Tree' model \nmodel=DecisionTreeClassifier(random_state=0)\n\n#Training the 'Decision Tree' model\nmodel.fit(X_train, y_train)\n\n#Finding the accuracy of 'Decision Tree' model\nacc=model.score(X_test, y_test)\n\n#Printing the accuracy\nprint(acc)\n\n#Code ends here\n\n\n# --------------\n#Importing header files\nfrom sklearn.model_selection import GridSearchCV\n\n#Parameter grid\nparameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)}\n\n# Code starts here\nmodel_2=DecisionTreeClassifier(random_state=0)\n\np_tree=GridSearchCV(estimator=model_2,param_grid=parameter_grid,cv=5)\n\np_tree.fit(X_train,y_train)\n\nacc_2=p_tree.score(X_test,y_test)\n# Code ends here\n\n\n# --------------\n#Importing header files\n\nfrom io import StringIO\nfrom sklearn.tree import export_graphviz\nfrom sklearn import tree\nfrom sklearn import metrics\nfrom IPython.display import Image\nimport pydotplus\n\n# Code starts here\ndot_data=export_graphviz(decision_tree=p_tree.best_estimator_,out_file=None,feature_names=X.columns,filled=True,class_names=['loan_paid_back_yes','loan_paid_back_no'])\n\ngraph_big=pydotplus.graph_from_dot_data(dot_data)\n\n\n\n\n# show graph - do not delete/modify the code below this line\nimg_path = user_data_dir+'/file.png'\ngraph_big.write_png(img_path)\n\nplt.figure(figsize=(20,15))\nplt.imshow(plt.imread(img_path))\nplt.axis('off')\nplt.show() \n\n# Code ends here\n\n\n" ]
[ [ "sklearn.model_selection.GridSearchCV", "pandas.read_csv", "sklearn.tree.export_graphviz", "numpy.arange", "matplotlib.pyplot.imread", "matplotlib.pyplot.subplots", "sklearn.model_selection.train_test_split", "sklearn.preprocessing.LabelEncoder", "sklearn.tree.DecisionTreeClassifier", "matplotlib.pyplot.bar", "matplotlib.pyplot.axis", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
wenlihaoyu/text-detection-ctpn
[ "1b934c07aa8a3774d38883af83532ec6cf2c7f8c" ]
[ "lib/rpn_msr/generate_anchors.py" ]
[ "# --------------------------------------------------------\n# Faster R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick and Sean Bell\n# --------------------------------------------------------\n\nimport numpy as np\n\n# Verify that we compute the same anchors as Shaoqing's matlab implementation:\n#\n# >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat\n# >> anchors\n#\n# anchors =\n#\n# -83 -39 100 56\n# -175 -87 192 104\n# -359 -183 376 200\n# -55 -55 72 72\n# -119 -119 136 136\n# -247 -247 264 264\n# -35 -79 52 96\n# -79 -167 96 184\n# -167 -343 184 360\n\n#array([[ -83., -39., 100., 56.],\n# [-175., -87., 192., 104.],\n# [-359., -183., 376., 200.],\n# [ -55., -55., 72., 72.],\n# [-119., -119., 136., 136.],\n# [-247., -247., 264., 264.],\n# [ -35., -79., 52., 96.],\n# [ -79., -167., 96., 184.],\n# [-167., -343., 184., 360.]])\ndef generate_basic_anchors(sizes, base_size=16):\n base_anchor = np.array([0, 0, base_size - 1, base_size - 1], np.int32)\n anchors = np.zeros((len(sizes), 4), np.int32)\n index = 0\n for h, w in sizes:\n anchors[index] = scale_anchor(base_anchor, h, w)\n index += 1\n return anchors\n\n\ndef scale_anchor(anchor, h, w):\n x_ctr = (anchor[0] + anchor[2]) * 0.5\n y_ctr = (anchor[1] + anchor[3]) * 0.5\n scaled_anchor = anchor.copy()\n scaled_anchor[0] = x_ctr - w / 2 # xmin\n scaled_anchor[2] = x_ctr + w / 2 # xmax\n scaled_anchor[1] = y_ctr - h / 2 # ymin\n scaled_anchor[3] = y_ctr + h / 2 # ymax\n return scaled_anchor\n\n\ndef generate_anchors(base_size=16, ratios=[0.5, 1, 2],\n scales=2**np.arange(3, 6)):\n heights = [11, 16, 23, 33, 48, 68, 97, 139, 198, 283]\n widths = [16]\n sizes = []\n for h in heights:\n for w in widths:\n sizes.append((h, w))\n return generate_basic_anchors(sizes)\n\n'''\ndef generate_anchors(base_size=16, ratios=[0.5, 1, 2],\n scales=2**np.arange(3, 6)):\n \"\"\"\n Generate anchor (reference) windows by enumerating aspect ratios X\n scales wrt a reference (0, 0, 15, 15) window.\n \"\"\"\n\n base_anchor = np.array([1, 1, base_size, base_size]) - 1\n ratio_anchors = _ratio_enum(base_anchor, ratios)\n anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)\n for i in xrange(ratio_anchors.shape[0])])\n return anchors\n\ndef _whctrs(anchor):\n \"\"\"\n Return width, height, x center, and y center for an anchor (window).\n \"\"\"\n\n w = anchor[2] - anchor[0] + 1\n h = anchor[3] - anchor[1] + 1\n x_ctr = anchor[0] + 0.5 * (w - 1)\n y_ctr = anchor[1] + 0.5 * (h - 1)\n return w, h, x_ctr, y_ctr\n\ndef _mkanchors(ws, hs, x_ctr, y_ctr):\n \"\"\"\n Given a vector of widths (ws) and heights (hs) around a center\n (x_ctr, y_ctr), output a set of anchors (windows).\n \"\"\"\n\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((x_ctr - 0.5 * (ws - 1),\n y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1),\n y_ctr + 0.5 * (hs - 1)))\n return anchors\n\ndef _ratio_enum(anchor, ratios):\n \"\"\"\n Enumerate a set of anchors for each aspect ratio wrt an anchor.\n \"\"\"\n\n w, h, x_ctr, y_ctr = _whctrs(anchor)\n size = w * h\n size_ratios = size / ratios\n ws = np.round(np.sqrt(size_ratios))\n hs = np.round(ws * ratios)\n anchors = _mkanchors(ws, hs, x_ctr, y_ctr)\n return anchors\n\ndef _scale_enum(anchor, scales):\n \"\"\"\n Enumerate a set of anchors for each scale wrt an anchor.\n \"\"\"\n\n w, h, x_ctr, y_ctr = _whctrs(anchor)\n ws = w * scales\n hs = h * scales\n anchors = _mkanchors(ws, hs, x_ctr, y_ctr)\n return anchors\n'''\nif __name__ == '__main__':\n import time\n t = time.time()\n a = generate_anchors()\n print(time.time() - t)\n print(a)\n from IPython import embed; embed()\n" ]
[ [ "numpy.arange", "numpy.array" ] ]
andreped/H2G-Net
[ "02ad99e404f547517040c44a637f0170b35b705d" ]
[ "src/architectures/UNet.py" ]
[ "from tensorflow.python.keras.layers import Input, Dense, Convolution2D, MaxPooling2D, Dropout, Flatten, SpatialDropout2D, \\\n ZeroPadding2D, Activation, AveragePooling2D, UpSampling2D, BatchNormalization, ConvLSTM2D, \\\n TimeDistributed, Concatenate, Lambda, Reshape, UpSampling3D, Convolution3D, MaxPooling3D, SpatialDropout3D\nfrom tensorflow.python.keras.models import Model\nimport tensorflow as tf\nimport numpy as np\n\n\ndef soft_threshold(x, threshold, name=None):\n # https://www.tensorflow.org/probability/api_docs/python/tfp/math/soft_threshold\n with tf.name_scope(name or 'soft_threshold'):\n x = tf.convert_to_tensor(x, name='x')\n threshold = tf.convert_to_tensor(threshold, dtype=x.dtype, name='threshold')\n return tf.sign(x) * tf.maximum(tf.abs(x) - threshold, 0.)\n\n\ndef convolution_block_2d(x, nr_of_convolutions, use_bn=False, spatial_dropout=None, renorm=False):\n for i in range(2):\n x = Convolution2D(nr_of_convolutions, 3, padding='same')(x)\n if use_bn:\n x = BatchNormalization(renorm=renorm)(x)\n #x = GroupNormalization(groups=8)(x)\n x = Activation('relu')(x)\n if spatial_dropout:\n x = SpatialDropout2D(spatial_dropout)(x)\n\n return x\n\n\ndef encoder_block_2d(x, nr_of_convolutions, use_bn=False, spatial_dropout=None, renorm=False):\n\n x_before_downsampling = convolution_block_2d(x, nr_of_convolutions, use_bn, spatial_dropout, renorm)\n x = MaxPooling2D((2, 2))(x_before_downsampling)\n\n return x, x_before_downsampling\n\n\ndef decoder_block_2d(x, nr_of_convolutions, cross_over_connection=None, use_bn=False, spatial_dropout=None, renorm=False):\n\n x = UpSampling2D((2, 2))(x)\n if cross_over_connection is not None:\n x = Concatenate()([cross_over_connection, x])\n x = convolution_block_2d(x, nr_of_convolutions, use_bn, spatial_dropout, renorm)\n\n return x\n\ndef convolution_block_3d(x, nr_of_convolutions, use_bn=False, spatial_dropout=None, renorm=False):\n for i in range(2):\n x = Convolution3D(nr_of_convolutions, 3, padding='same')(x)\n if use_bn:\n x = BatchNormalization(renorm=renorm)(x)\n #x = GroupNormalization(groups=8)(x)\n x = Activation('relu')(x)\n if spatial_dropout:\n x = SpatialDropout3D(spatial_dropout)(x)\n\n return x\n\n\ndef encoder_block_3d(x, nr_of_convolutions, use_bn=False, spatial_dropout=None, renorm=False):\n\n x_before_downsampling = convolution_block_3d(x, nr_of_convolutions, use_bn, spatial_dropout, renorm)\n downsample = [2, 2, 2]\n for i in range(1, 4):\n if x.shape[i] <= 4:\n downsample[i-1] = 1\n\n x = MaxPooling3D(downsample)(x_before_downsampling)\n\n return x, x_before_downsampling\n\n\ndef decoder_block_3d(x, nr_of_convolutions, cross_over_connection=None, use_bn=False, spatial_dropout=None, renorm=False):\n\n upsample = [2, 2, 2]\n if cross_over_connection is not None:\n for i in range(1, 4):\n if cross_over_connection.shape[i] == x.shape[i]:\n upsample[i-1] = 1\n x = UpSampling3D(upsample)(x)\n if cross_over_connection is not None:\n x = Concatenate()([cross_over_connection, x])\n x = convolution_block_3d(x, nr_of_convolutions, use_bn, spatial_dropout, renorm)\n\n return x\n\n\ndef encoder_block(x, nr_of_convolutions, use_bn=False, spatial_dropout=None, dims=2, renorm=False):\n if dims == 2:\n return encoder_block_2d(x, nr_of_convolutions, use_bn, spatial_dropout, renorm)\n elif dims == 3:\n return encoder_block_3d(x, nr_of_convolutions, use_bn, spatial_dropout, renorm)\n else:\n raise ValueError\n\n\ndef decoder_block(x, nr_of_convolutions, cross_over_connection=None, use_bn=False, spatial_dropout=None, dims=2, renorm=False):\n if dims == 2:\n return decoder_block_2d(x, nr_of_convolutions, cross_over_connection, use_bn, spatial_dropout, renorm)\n elif dims == 3:\n return decoder_block_3d(x, nr_of_convolutions, cross_over_connection, use_bn, spatial_dropout, renorm)\n else:\n raise ValueError\n\n\ndef convolution_block(x, nr_of_convolutions, use_bn=False, spatial_dropout=None, dims=2, renorm=False):\n if dims == 2:\n return convolution_block_2d(x, nr_of_convolutions, use_bn, spatial_dropout, renorm)\n elif dims == 3:\n return convolution_block_3d(x, nr_of_convolutions, use_bn, spatial_dropout, renorm)\n else:\n raise ValueError\n\nclass VGGnet():\n def __init__(self, input_shape, nb_classes):\n if len(input_shape) != 3:\n raise ValueError('Input shape must have 3 dimensions')\n if nb_classes <= 1:\n raise ValueError('Classes must be > 1')\n self.nb_classes = nb_classes\n self.input_shape = input_shape\n self.convolutions = None\n self.use_bn = True\n self.spatial_dropout = None\n self.dense_dropout = 0.5\n self.dense_size = 1024\n\n def set_dense_size(self, size):\n self.dense_size = size\n\n def set_dense_dropout(self, dropout):\n self.dense_dropout = dropout\n\n def set_spatial_dropout(self, dropout):\n self.spatial_dropout = dropout\n\n def set_convolutions(self, convolutions):\n if len(convolutions) != self.get_depth():\n raise ValueError('Nr of convolutions must have length ' + str(self.get_depth()*2 + 1))\n self.convolutions = convolutions\n\n def get_depth(self):\n init_size = min(self.input_shape[0], self.input_shape[1])\n size = init_size\n depth = 0\n while size % 2 == 0 and size > 4:\n size /= 2\n depth += 1\n\n return depth + 1\n\n def create(self):\n \"\"\"\n Create model and return it\n\n :return: keras model\n \"\"\"\n\n input_layer = Input(shape=self.input_shape)\n x = input_layer\n\n init_size = min(self.input_shape[:-1])\n size = init_size\n\n convolutions = self.convolutions\n if convolutions is None:\n # Create convolutions\n convolutions = []\n nr_of_convolutions = 8\n for i in range(self.get_depth()):\n convolutions.append(nr_of_convolutions)\n nr_of_convolutions *= 2\n\n i = 0\n while size % 2 == 0 and size > 4:\n x, _ = encoder_block_2d(x, convolutions[i], self.use_bn, self.spatial_dropout)\n size /= 2\n i += 1\n\n x = convolution_block_2d(x, convolutions[i], self.use_bn, self.spatial_dropout)\n\n x = Flatten(name=\"flatten\")(x)\n x = Dense(self.dense_size, activation='relu')(x)\n if self.dense_dropout is not None:\n x = Dropout(self.dense_dropout)(x)\n x = Dense(self.dense_size, activation='relu')(x)\n if self.dense_dropout is not None:\n x = Dropout(self.dense_dropout)(x)\n x = Dense(self.nb_classes, activation='softmax')(x)\n\n return Model(inputs=input_layer, outputs=x, name='vgg')\n\n\nclass Unet():\n def __init__(self, input_shape, nb_classes):\n if len(input_shape) != 3 and len(input_shape) != 4:\n raise ValueError('Input shape must have 3 or 4 dimensions')\n if len(input_shape) == 3:\n self.dims = 2\n else:\n self.dims = 3\n if nb_classes <= 1:\n raise ValueError('Segmentation classes must be > 1')\n self.input_shape = input_shape\n self.nb_classes = nb_classes\n self.convolutions = None\n self.encoder_use_bn = True\n self.decoder_use_bn = True\n self.encoder_spatial_dropout = None\n self.decoder_spatial_dropout = None\n self.bottom_level = 4\n self.renorm = False\n\n def set_bn(self, use_bn):\n self.decoder_use_bn = use_bn\n self.encoder_use_bn = use_bn\n\n def set_renorm(self, renorm):\n self.renorm = renorm\n\n def set_convolutions(self, convolutions):\n #if len(convolutions) != self.get_depth()*2 + 1:\n # raise ValueError('Nr of convolutions must have length ' + str(self.get_depth()*2 + 1))\n self.convolutions = convolutions\n\n def get_depth(self):\n init_size = max(self.input_shape[:-1])\n size = init_size\n depth = 0\n while size % 2 == 0 and size > self.bottom_level:\n size /= 2\n size = np.ceil(size)\n depth += 1\n\n return depth\n\n def get_dice_loss(self, use_background=False):\n def dice_loss(target, output, epsilon=1e-10):\n smooth = 1.\n dice = 0\n\n for object in range(0 if use_background else 1, self.nb_classes):\n if self.dims == 2:\n output1 = output[:, :, :, object]\n target1 = target[:, :, :, object]\n shapes = (1, 2)\n else:\n output1 = output[:, :, :, :, object]\n target1 = target[:, :, :, :, object]\n shapes = (1, 2, 3)\n intersection1 = tf.reduce_sum(output1 * target1, axis=shapes)\n union1 = tf.reduce_sum(output1 * output1, axis=shapes) + tf.reduce_sum(target1 * target1, axis=shapes)\n dice += (2. * intersection1 + smooth) / (union1 + smooth)\n\n if use_background:\n dice /= self.nb_classes\n else:\n dice /= (self.nb_classes - 1)\n\n return tf.clip_by_value(1. - dice, 0., 1. - epsilon)\n\n return dice_loss\n\n def get_dice_metric(self, use_background=False):\n def dice(target, output, epsilon=1e-10):\n smooth = 0 # 1e-8 # 1.\n dice = 0\n\n # first threshold output volume\n output = tf.cast(tf.math.greater_equal(output, tf.constant([0.5])), tf.float32)\n\n for object in range(0 if use_background else 1, self.nb_classes):\n if self.dims == 2:\n output1 = output[:, :, :, object]\n target1 = target[:, :, :, object]\n shapes = (1, 2)\n else:\n output1 = output[:, :, :, :, object]\n target1 = target[:, :, :, :, object]\n shapes = (1, 2, 3)\n intersection1 = tf.reduce_sum(output1 * target1, axis=shapes)\n union1 = tf.reduce_sum(output1 * output1, axis=shapes) + tf.reduce_sum(target1 * target1, axis=shapes)\n dice += (2. * intersection1 + smooth) / (union1 + smooth)\n\n if use_background:\n dice /= self.nb_classes\n else:\n dice /= (self.nb_classes - 1)\n\n return dice # tf.clip_by_value(dice, 0., 1. - epsilon)\n\n return dice\n\n\n def create(self):\n \"\"\"\n Create model and return it\n\n :return: keras model\n \"\"\"\n\n input_layer = Input(shape=self.input_shape)\n x = input_layer\n\n init_size = max(self.input_shape[:-1])\n size = init_size\n\n convolutions = self.convolutions\n if convolutions is None:\n # Create convolutions\n convolutions = []\n nr_of_convolutions = 8\n for i in range(self.get_depth()):\n convolutions.append(int(nr_of_convolutions))\n nr_of_convolutions *= 2\n convolutions.append(int(nr_of_convolutions))\n for i in range(self.get_depth()):\n convolutions.append(int(nr_of_convolutions))\n nr_of_convolutions /= 2\n\n connection = {}\n i = 0\n while size % 2 == 0 and size > self.bottom_level:\n print()\n print(size)\n x, connection[size] = encoder_block(x, convolutions[i], self.encoder_use_bn, self.encoder_spatial_dropout, self.dims, self.renorm)\n size /= 2\n i += 1\n\n x = convolution_block(x, convolutions[i], self.encoder_use_bn, self.encoder_spatial_dropout, self.dims, self.renorm)\n i += 1\n\n while size < init_size:\n size *= 2\n x = decoder_block(x, convolutions[i], connection[size], self.decoder_use_bn, self.decoder_spatial_dropout, self.dims, self.renorm)\n i += 1\n\n if self.dims == 2:\n x = Convolution2D(self.nb_classes, 1, activation='softmax')(x)\n else:\n x = Convolution3D(self.nb_classes, 1, activation='softmax')(x)\n\n return Model(inputs=input_layer, outputs=x)\n\n\nif __name__ == \"__main__\":\n network = Unet(input_shape=(1024, 1024, 4), nb_classes=2)\n network.encoder_spatial_dropout = None\n network.decoder_spatial_dropout = 0.1\n network.set_convolutions([8, 16, 32, 64, 128, 128, 256, 256, 512, 256, 256, 128, 128, 64, 32, 16, 8])\n network.set_bn(True)\n network.set_renorm(True)\n model = network.create()\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.sign", "tensorflow.python.keras.layers.Flatten", "tensorflow.python.keras.layers.MaxPooling2D", "tensorflow.python.keras.layers.Dense", "tensorflow.reduce_sum", "tensorflow.python.keras.layers.Convolution2D", "tensorflow.python.keras.layers.BatchNormalization", "tensorflow.python.keras.layers.Activation", "tensorflow.python.keras.layers.UpSampling2D", "numpy.ceil", "tensorflow.name_scope", "tensorflow.python.keras.layers.MaxPooling3D", "tensorflow.python.keras.layers.UpSampling3D", "tensorflow.python.keras.layers.Convolution3D", "tensorflow.python.keras.layers.Dropout", "tensorflow.python.keras.layers.Input", "tensorflow.clip_by_value", "tensorflow.constant", "tensorflow.python.keras.layers.SpatialDropout3D", "tensorflow.python.keras.layers.Concatenate", "tensorflow.python.keras.models.Model", "tensorflow.python.keras.layers.SpatialDropout2D", "tensorflow.abs" ] ]
dancerphil/crawler-collection
[ "d25c076922fb7c00c923cc6f50394dcd7560ac0e" ]
[ "cny-history/1.py" ]
[ "import pandas as pd\n\ndf = pd.read_csv('data.csv')\ndf = df[['date', 'USD/CNY']]\ndf.rename(columns={'USD/CNY': 'value'}, inplace=True)\ndf.to_json('usd.json', orient='records')\n" ]
[ [ "pandas.read_csv" ] ]
kevingodlike/char-rnn-tensorflow-master
[ "1bc17d3082eda82d779a4bfc1b44469575bceb43" ]
[ "utils.py" ]
[ "import codecs\nimport os\nimport collections\nfrom six.moves import cPickle\nimport numpy as np\n\n\nclass TextLoader():\n def __init__(self, data_dir, batch_size, seq_length, encoding='utf-8'):\n self.data_dir = data_dir\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.encoding = encoding\n\n input_file = os.path.join(data_dir, \"input.txt\")\n vocab_file = os.path.join(data_dir, \"vocab.pkl\")\n tensor_file = os.path.join(data_dir, \"data.npy\")\n\n if not (os.path.exists(vocab_file) and os.path.exists(tensor_file)):\n print(\"reading text file\")\n self.preprocess(input_file, vocab_file, tensor_file)\n else:\n print(\"loading preprocessed files\")\n self.load_preprocessed(vocab_file, tensor_file)\n self.create_batches()\n self.reset_batch_pointer()\n\n # preprocess data for the first time.\n def preprocess(self, input_file, vocab_file, tensor_file):\n with codecs.open(input_file, \"r\", encoding=self.encoding) as f:\n data = f.read()\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(), key=lambda x: -x[1])\n self.chars, _ = zip(*count_pairs)\n self.vocab_size = len(self.chars)\n self.vocab = dict(zip(self.chars, range(len(self.chars))))\n with open(vocab_file, 'wb') as f:\n cPickle.dump(self.chars, f)\n self.tensor = np.array(list(map(self.vocab.get, data)))\n np.save(tensor_file, self.tensor)\n\n\n # load the preprocessed the data if the data has been processed before.\n def load_preprocessed(self, vocab_file, tensor_file):\n with open(vocab_file, 'rb') as f:\n self.chars = cPickle.load(f)\n self.vocab_size = len(self.chars)\n self.vocab = dict(zip(self.chars, range(len(self.chars))))\n self.tensor = np.load(tensor_file)\n self.num_batches = int(self.tensor.size / (self.batch_size *\n self.seq_length))\n # seperate the whole data into different batches.\n def create_batches(self):\n self.num_batches = int(self.tensor.size / (self.batch_size *\n self.seq_length))\n\n # When the data (tensor) is too small,\n # let's give them a better error message\n if self.num_batches == 0:\n assert False, \"Not enough data. Make seq_length and batch_size small.\"\n\n # reshape the original data into the length self.num_batches * self.batch_size * self.seq_length for convenience.\n self.tensor = self.tensor[:self.num_batches * self.batch_size * self.seq_length]\n xdata = self.tensor\n ydata = np.copy(self.tensor)\n\n #ydata is the xdata with one position shift.\n ydata[:-1] = xdata[1:]\n ydata[-1] = xdata[0]\n self.x_batches = np.split(xdata.reshape(self.batch_size, -1),\n self.num_batches, 1)\n self.y_batches = np.split(ydata.reshape(self.batch_size, -1),\n self.num_batches, 1)\n\n def next_batch(self):\n x, y = self.x_batches[self.pointer], self.y_batches[self.pointer]\n self.pointer += 1\n return x, y\n\n def reset_batch_pointer(self):\n self.pointer = 0\n" ]
[ [ "numpy.load", "numpy.copy", "numpy.save" ] ]
PrincetonUniversity/blockchain-detect
[ "8a17256ac98db19cfe1fb2dcc213a11a1f9b2592" ]
[ "grid-search/preamble.py" ]
[ "\n\nimport numpy as np\nimport pickle\nimport pandas as pd\nimport os, os.path\nfrom itertools import *\nimport math\nimport random\nimport scipy.stats\nimport sys\nimport random\nfrom joblib import Parallel, delayed\nimport multiprocessing\nnproc = max(1, multiprocessing.cpu_count())\nfrom scipy.sparse import csr_matrix\nimport scipy.sparse\nfrom sklearn import cross_validation, base\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.grid_search import ParameterGrid, GridSearchCV\nfrom scipy.sparse.linalg import svds\nfrom sklearn.decomposition import NMF\nimport networkx as nx\nimport scipy\nimport time\n \n# Warnings\n\nimport warnings\nwarnings.filterwarnings('ignore')\n \n# Extract data in df form and as a sparse matrix\n\ndftrain = pd.read_csv('../data/txTripletsCounts.txt',\n header=None,\n index_col=None,\n sep=' ',\n names=['sender','receiver','transaction'])\n\ndftest = pd.read_csv('../data/testTriplets.txt',\n header=None,\n index_col=None,\n sep=' ',\n names=['sender','receiver','transaction'])\n\ndim = max(df[c].max() for df in (dftrain, dftest) for c in ['sender', 'receiver'])\ndim += 1\n\n# both the matrices here have m[i, j] return the number of transactions from i to j\n# in training.\ntrain_csr = csr_matrix((dftrain['transaction'],(dftrain['sender'],dftrain['receiver'])),\n shape=(dim,dim),\n dtype=float)\n\ntrain_csc = train_csr.tocsc()\n\ndef maxlgbin(series): return math.ceil(np.log2(series.max()))\n\nbincsr = train_csr.sign()\nbincsc = train_csc.sign()\nlogcsr = train_csr.log1p()\nlogcsc = train_csc.log1p()\n\nprint('loaded data')\n\n# Create a simple CV scheme (relying on our time-stationarity assumption)\n# for evaluation.\n\nXtrain = dftrain[['sender', 'receiver']].values.astype('int')\nYtrain = dftrain['transaction'].values.astype('bool')\nXtest = dftest[['sender', 'receiver']].values.astype('int')\nYtest = dftest['transaction'].values.astype('bool')\niX = np.arange(len(Xtrain))\nn_ones = Ytest.sum()\nn_zeros = len(Ytest) - n_ones\n\nn_folds = 10\n\ncv = list(islice(cross_validation.KFold(\n len(Ytrain), n_folds=(len(Xtrain) // n_ones), shuffle=True, random_state=0), 0, n_folds))\n\ndef named_matrix(A):\n if A == 'counts': return train_csr\n elif A == 'binary': return bincsr\n elif A == 'log1p': return logcsr\n return None\n\n\ndef masked_matrix(A, iX):\n M = named_matrix(A)\n mask = np.ones(len(Xtrain), dtype='bool')\n mask[iX] = 0\n X = Xtrain[mask]\n vals = np.ones(len(X))\n X = X.transpose()\n disable = csr_matrix((vals, (X[0], X[1])), shape=M.shape) \n return M - M.multiply(disable)\n\ndef AUC(exact, pred):\n fpr, tpr, thresholds = roc_curve(exact, pred)\n return auc(fpr, tpr)\n\ndef sample_one():\n while True:\n i, j = (random.randint(0, dim - 1) for i in range(2))\n if i == j: continue\n if not bincsr[i, j]: return [i, j]\n\nclass CachedTrainingMatrixEstimator(base.BaseEstimator):\n def __init__(self):\n super(CachedTrainingMatrixEstimator, self).__init__()\n \n def score(self, iX, Y):\n assert abs(len(Y) - n_ones) <= 1\n assert np.all(Y > 0)\n assert len(iX) == len(Y)\n yes_train = Xtrain[iX]\n no_train = np.array([sample_one() for i in range(n_zeros)])\n pred_yes = self.predict(yes_train)\n pred_no = self.predict(no_train)\n exact_yes = np.ones(len(pred_yes), dtype='int')\n exact_no = np.zeros(len(pred_no), dtype='int')\n exact = np.concatenate((exact_yes, exact_no))\n pred = np.concatenate((pred_yes, pred_no))\n return AUC(exact, pred)\n\ndef cvAUC(estimator, grid, name, verbosity=0):\n print('Running grid search for {}'.format(name))\n gse = GridSearchCV(estimator, grid, n_jobs=nproc, verbose=verbosity, cv=cv, refit=False)\n t = time.time()\n gse.fit(iX, Ytrain)\n t = int(time.time() - t)\n print('\\tran cv grid with best AUC {:.4f} in {} s '.format(gse.best_score_, t))\n print('\\tmodel:', gse.best_params_)\n\n" ]
[ [ "pandas.read_csv", "sklearn.metrics.roc_curve", "scipy.sparse.csr_matrix", "sklearn.grid_search.GridSearchCV", "numpy.all", "numpy.concatenate", "sklearn.metrics.auc" ] ]
facebookresearch/svinfer
[ "14edce1af6c91e622b8691f5d78a490a8585e7b5" ]
[ "svinfer/linear_model/linear_regression.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nimport numpy as np\nfrom ..processor.commons import AbstractProcessor\nfrom ..processor.matrix import get_result\n\n\n\nclass LinearRegressionCoefficients:\n \"\"\"\n Internal class to be used by LinearRegression\n You should not call this class directly.\n \"\"\"\n\n def __init__(self, n, xtx, xty, yty, x_s2, df_corrected=True):\n if xtx.shape[0] != xtx.shape[1]:\n raise ValueError(\"xtx should be square matrix!\")\n if xtx.shape[0] != xty.shape[0]:\n raise ValueError(\"xtx and xty have unmatched dimension\")\n if any(s2 < 0.0 for s2 in x_s2):\n raise ValueError(\"x_s2 should not contain negative elements\")\n self.n = n\n self.xtx = xtx\n self.xty = xty\n self.yty = yty\n self.x_s2 = x_s2\n self.df_corrected = df_corrected\n self.k = self.xtx.shape[0]\n self.beta = None\n self.sigma_sq = None\n self.omega = None\n\n def estimate_beta(self):\n self.omega = self.xtx.copy()\n self.omega[np.diag_indices(self.k)] = np.diag(self.omega) - self.x_s2\n\n # check whether omega is positive-definite or not\n # reference on threshold https://fburl.com/7y4wyaev\n eigenvalues = np.linalg.eigvals(self.omega)\n if eigenvalues.max() < 0 or (\n eigenvalues.min() < eigenvalues.max() * self.k * np.finfo(np.float_).eps\n ):\n logging.warning(\"omega is not positive definite\")\n self.beta = np.linalg.lstsq(self.omega, self.xty, rcond=None)[0]\n else:\n self.beta = np.linalg.solve(self.omega, self.xty)\n\n def estimate_residual_var(self):\n if self.yty is None:\n self.sigma_sq = None\n return\n df = self.n\n if self.df_corrected:\n # I think it should be df -= self.k\n # but use df -= self.k - 1 for now to be aligned with the R package\n df -= self.k - 1\n term1 = (\n self.yty\n - 2 * self.beta.T.dot(self.xty)\n + self.beta.T.dot(self.xtx).dot(self.beta)\n ) * self.n / df\n term2 = (self.beta ** 2 * self.x_s2).sum()\n sigma_sq = term1 - term2\n # check whether variance is positive or not\n if sigma_sq < 0.0:\n logging.warning(\"negative residual variance!\")\n self.sigma_sq = sigma_sq\n\n def estimate_all(self):\n self.estimate_beta()\n self.estimate_residual_var()\n return self.beta, self.sigma_sq, self.omega\n\n\nclass LinearRegressionVariance:\n \"\"\"\n Internal class to be used by LinearRegressionFit\n You should not call this class directly.\n \"\"\"\n\n def __init__(\n self,\n n,\n xtx,\n xty,\n yty,\n sigma_sq,\n omega,\n x_s2,\n n_replications=500,\n random_state=None,\n ):\n self.n = n\n self.xtx = xtx\n self.xty = xty\n self.yty = yty\n self.sigma_sq = sigma_sq\n self.omega = omega\n self.x_s2 = x_s2\n self.n_replications = n_replications\n self.random_state = random_state\n self.k = self.xtx.shape[0]\n\n def estimate_vcov_xx_xx(self, k, l, j, m):\n return (\n self.omega[k, l] * self.x_s2[j] * (j == m)\n + self.omega[k, m] * self.x_s2[j] * (j == l)\n + self.omega[j, l] * self.x_s2[k] * (k == m)\n + self.omega[j, m] * self.x_s2[k] * (k == l)\n + self.x_s2[k] * (k == l) * self.x_s2[j] * (j == m)\n + self.x_s2[k] * (k == m) * self.x_s2[j] * (j == l)\n ) / self.n\n\n def estimate_vcov_xy_xy(self, k, j):\n return (\n self.sigma_sq * self.omega[k, j]\n + self.x_s2[k] * (k == j) * self.yty\n ) / self.n\n\n def estimate_vcov_xy_xx(self, k, j, m):\n return (\n self.x_s2[k] * (k == m) * self.xty[j]\n + self.x_s2[k] * (k == j) * self.xty[m]\n ) / self.n\n\n def simulate_distribution(self):\n index = np.triu_indices(self.k)\n t = np.concatenate((self.xtx[index], self.xty), axis=0)\n\n d1, d2 = self.k * (self.k + 1) // 2, self.k\n v_t = np.zeros((d1 + d2, d1 + d2))\n for i1 in range(d1 + d2):\n for i2 in range(i1 + 1):\n if i1 < d1 and i2 < d1:\n k, j = index[0][i1], index[1][i1]\n l, m = index[0][i2], index[1][i2]\n v_t[i1, i2] = self.estimate_vcov_xx_xx(k, l, j, m)\n elif i2 < d1 <= i1:\n k = i1 - d1\n j, m = index[0][i2], index[1][i2]\n v_t[i1, i2] = self.estimate_vcov_xy_xx(k, j, m)\n else:\n k = i1 - d1\n j = i2 - d1\n v_t[i1, i2] = self.estimate_vcov_xy_xy(k, j)\n v_t[i2, i1] = v_t[i1, i2]\n\n return t, v_t\n\n def transform_vector_to_matrix(self, sample):\n k = self.k\n d = k * (k + 1) // 2\n xty = sample[d:]\n xtx = np.zeros((k, k))\n xtx[np.triu_indices(k)] = sample[:d]\n i, j = np.triu_indices(k, 1)\n xtx[(j, i)] = xtx[(i, j)]\n return xtx, xty\n\n def simulate_beta_vcov(self):\n t, v_t = self.simulate_distribution()\n np.random.seed(self.random_state)\n t_samples = np.random.multivariate_normal(\n t, v_t, self.n_replications, check_valid=\"ignore\"\n )\n\n simu_beta_list = []\n for i in range(self.n_replications):\n simu_xtx, simu_xty = self.transform_vector_to_matrix(t_samples[i, :])\n simu_beta, _, _ = LinearRegressionCoefficients(\n self.n, simu_xtx, simu_xty, None, self.x_s2\n ).estimate_all()\n simu_beta_list.append(simu_beta)\n\n beta_vcov = np.cov(np.array(simu_beta_list), rowvar=False)\n return beta_vcov\n\n\nclass LinearRegression:\n def __init__(\n self,\n x_columns,\n y_column,\n x_s2,\n fit_intercept=True,\n df_corrected=True,\n n_replications=500,\n random_state=None,\n ):\n \"\"\"\n LinearRegression fits a linear regression where some variables\n in the training data are subject to naturally occurring measurement\n error, and implements a computationally efficient algorithm for\n statistical inference in large datasets.\n\n :param x_columns: list of strings.\n Specify the column names for the features in the data.\n :param y_column: string\n Specify the column name for the response in the data.\n :param x_s2: list of doubles\n Specify the variance of noise that was added to each of the features.\n It’s length should be equal to that of x_columns. If no noise is\n added to a feature, use 0.0 in the corresponding place.\n :param fit_intercept: bool, optional, default to True\n Whether to include the intercept into the model. If set to False,\n the design matrix will only consist of the features specified.\n If set to True, a column of 1 will be added to the design matrix\n besides features specified.\n :param df_corrected: bool, optional, default to True\n Whether to adjust the degree of freedom when estimating the error\n variance. If set to False, the degree of freedom is n, where n is\n the sample size. If set to True, the degree of freedom is n - p,\n where p is the number of columns in the design matrix.\n :param n_replications: int, optional, default to 500\n The number of simulation replicates. It should be a positive integer.\n :param random_state: int or None, optional, default to None\n Determines random number generation for dataset creation. Pass an\n int for reproducible output across multiple function calls.\n \"\"\"\n self.x_columns = x_columns\n self.y_column = y_column\n self.x_s2 = ([0.0] if fit_intercept else []) + x_s2\n self.fit_intercept = fit_intercept\n self.df_corrected = df_corrected\n self.n_replications = n_replications\n self.random_state = random_state\n\n self.beta = None\n self.sigma_sq = None\n self.beta_vcov = None\n self.beta_standarderror = None\n\n def _preprocess_data(self, data: AbstractProcessor):\n logging.info(\"data is an instance of %s\", type(data))\n x, y = data.prepare_xy(self.x_columns, self.y_column, self.fit_intercept)\n z = get_result({\n \"xtx\": x.cross(x),\n \"xty\": x.cross(y),\n \"yty\": y.cross(y),\n }, data.run_query)\n n = z[\"sample_size\"]\n xtx = z[\"xtx\"]\n xty = z[\"xty\"][:, 0]\n yty = z[\"yty\"][0, 0]\n return n, xtx, xty, yty\n\n def fit(self, data):\n \"\"\"\n fit model\n :return: a tuple; the 1st element is the estimated beta,\n the 2nd element is the estimated sigma squared, the 3rd element is\n the estimated variance-covariance matrix of the estimated beta.\n \"\"\"\n n, xtx, xty, yty = self._preprocess_data(data)\n self.beta, self.sigma_sq, omega = LinearRegressionCoefficients(\n n, xtx, xty, yty, self.x_s2, df_corrected=self.df_corrected\n ).estimate_all()\n\n self.beta_vcov = LinearRegressionVariance(\n n,\n xtx,\n xty,\n yty,\n self.sigma_sq,\n omega,\n self.x_s2,\n n_replications=self.n_replications,\n random_state=self.random_state,\n ).simulate_beta_vcov()\n\n self.beta_standarderror = np.sqrt(np.diag(self.beta_vcov))\n return self\n" ]
[ [ "numpy.diag", "numpy.linalg.eigvals", "numpy.linalg.solve", "numpy.random.seed", "numpy.triu_indices", "numpy.random.multivariate_normal", "numpy.finfo", "numpy.concatenate", "numpy.linalg.lstsq", "numpy.diag_indices", "numpy.array", "numpy.zeros" ] ]
facebookresearch/robust_mobo
[ "39195f5796e3dd19c411d4cd4c8252488ecbd0ec" ]
[ "robust_mobo/nsgaii_utils.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nr\"\"\"\nNSGA-II utilities\n\"\"\"\nfrom typing import Dict\nfrom torch import Tensor\nimport torch\nimport numpy as np\nfrom pymoo.core.problem import Problem\nfrom pymoo.core.population import Population\nfrom pymoo.algorithms.moo.nsga2 import NSGA2\nfrom botorch.test_functions.base import BaseTestProblem, ConstrainedBaseTestProblem\nfrom pymoo.util.termination.no_termination import NoTermination\n\n\ndef get_nsgaii(X_init: Tensor, Y_init: Tensor, base_function: BaseTestProblem) -> NSGA2:\n is_constrained = isinstance(base_function, ConstrainedBaseTestProblem)\n pymoo_problem = Problem(\n n_var=base_function.dim,\n n_obj=base_function.num_objectives,\n n_constr=base_function.num_constraints if is_constrained else 0,\n xl=np.zeros(base_function.dim),\n xu=np.ones(base_function.dim),\n )\n pop = Population.new(\"X\", X_init.cpu().numpy())\n # pymoo minimizes objectives\n pop.set(\"F\", -Y_init[:, : base_function.num_objectives].cpu().numpy())\n if is_constrained:\n # negative constraint slack implies feasibility\n pop.set(\"G\", -Y_init[:, base_function.num_objectives :].cpu().numpy())\n algorithm = NSGA2(pop_size=10, sampling=pop)\n # let the algorithm object never terminate and let the loop control it\n termination = NoTermination()\n\n # create an algorithm object that never terminates\n algorithm.setup(pymoo_problem, termination=termination, seed=0)\n\n return algorithm\n" ]
[ [ "numpy.zeros", "numpy.ones" ] ]
Ekko-zn/StegoAdv
[ "2852dbc85d66f30efb7127695c0d75806bf4aa4c" ]
[ "hpfloss.py" ]
[ "import torch\r\nimport torch.nn as nn\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport os\r\nfrom cfg import *\r\nimport sys\r\nimg_size = 256\r\ncriterionl1 = torch.nn.L1Loss()\r\nSRM_npy = np.load('SRM_Kernels.npy')\r\nclass HILL(nn.Module):\r\n def __init__(self,img_size):\r\n super(HILL,self).__init__()\r\n self.img_size = img_size\r\n self.pad_3 = nn.ReplicationPad2d(3)\r\n self.pad = nn.ReplicationPad2d(7)\r\n self.conv1 = nn.Conv2d(1,1,3,1,padding=1,bias=False)\r\n self.avepool1 = nn.AvgPool2d(3,stride = 1,padding=1)\r\n self.avepool2 = nn.AvgPool2d(15,stride = 1)\r\n self.eps = 1e-10\r\n self.res()\r\n def res(self):\r\n self.conv1.weight.data = torch.tensor([[-1,2,-1],[2,-4,2],[-1,2,-1]],dtype = torch.float).view(1,1,3,3)\r\n def forward(self, x):\r\n t1 = self.pad_3(x)\r\n t2 = self.conv1(t1)\r\n t3 = self.avepool1(torch.abs(t2))\r\n t4 = 1 / (t3[:,:,3:self.img_size+3,3:self.img_size+3]+self.eps)\r\n t5 = self.avepool2(self.pad(t4))\r\n return t5\r\n\r\nclass HPF_SRM(nn.Module):\r\n def __init__(self):\r\n super(HPF_SRM, self).__init__()\r\n self.SRM = torch.nn.Conv2d(1, 30, 5, 1, 0, bias=False)\r\n self.Weight = torch.from_numpy(SRM_npy)\r\n self.SRM.weight.data = self.Weight\r\n self.SRM.weight.requires_grad = False\r\n self.Padding = torch.nn.ReplicationPad2d(2)\r\n def forward(self, input):\r\n t1 = self.SRM(self.Padding(input))\r\n return t1.permute(0, 1, 2, 3)\r\n\r\nHILLCOST = HILL(img_size=img_size).to(device)\r\nHPF = HPF_SRM().to(device)\r\n\r\ndef HPFLOSS(img,target):\r\n img = img.view(-1,1,img_size,img_size)\r\n target = target.view(-1,1,img_size,img_size)\r\n HPFimg = HPF(img).view(-1,1,img_size,img_size)\r\n HPFtarget = HPF(target).detach().view(-1,1,img_size,img_size)\r\n cost = HILLCOST(HPFtarget)\r\n cost[cost > 1] = 1\r\n cost = cost.detach()\r\n loss = criterionl1(cost * HPFimg, cost * HPFtarget)\r\n return loss\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n t1 = torch.rand(2,3,256,256)\r\n t1.requires_grad = True\r\n t2 = torch.rand(2,3,256,256)\r\n m = HPFLOSS(t1,t2)\r\n m.backward()\r\n pass" ]
[ [ "torch.abs", "torch.nn.Conv2d", "torch.from_numpy", "torch.tensor", "torch.nn.AvgPool2d", "torch.rand", "numpy.load", "torch.nn.L1Loss", "torch.nn.ReplicationPad2d" ] ]
Khurramjaved96/Dicta
[ "416638a3d1ad851b00394e55a7574ec978080d51" ]
[ "train_model.py" ]
[ "''' Document Localization using Recursive CNN\n Maintainer : Khurram Javed\n Email : [email protected] '''\n\nfrom __future__ import print_function\n\nimport argparse\n\nimport torch\nimport torch.utils.data as td\n\nimport dataprocessor\nimport experiment as ex\nimport model\nimport trainer\nimport utils\n\nparser = argparse.ArgumentParser(description='Recursive-CNNs')\nparser.add_argument('--batch-size', type=int, default=32, metavar='N',\n help='input batch size for training (default: 32)')\nparser.add_argument('--lr', type=float, default=0.005, metavar='LR',\n help='learning rate (default: 0.005)')\nparser.add_argument('--schedule', type=int, nargs='+', default=[10, 20, 30],\n help='Decrease learning rate at these epochs.')\nparser.add_argument('--gammas', type=float, nargs='+', default=[0.2, 0.2, 0.2],\n help='LR is multiplied by gamma[k] on schedule[k], number of gammas should be equal to schedule')\nparser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum (default: 0.9)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--pretrain', action='store_true', default=False,\n help='Pretrain the model on CIFAR dataset?')\nparser.add_argument('--load-ram', action='store_true', default=False,\n help='Load data in ram: TODO : Remove this')\nparser.add_argument('--debug', action='store_true', default=True,\n help='Debug messages')\nparser.add_argument('--seed', type=int, default=2323,\n help='Seeds values to be used')\nparser.add_argument('--log-interval', type=int, default=5, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--model-type', default=\"resnet\",\n help='model type to be used. Example : resnet32, resnet20, densenet, test')\nparser.add_argument('--name', default=\"noname\",\n help='Name of the experiment')\nparser.add_argument('--output-dir', default=\"../\",\n help='Directory to store the results; a new folder \"DDMMYYYY\" will be created '\n 'in the specified directory to save the results.')\nparser.add_argument('--decay', type=float, default=0.00001, help='Weight decay (L2 penalty).')\nparser.add_argument('--epochs', type=int, default=40, help='Number of epochs for trianing')\nparser.add_argument('--dataset', default=\"document\", help='Dataset to be used; example document, corner')\nparser.add_argument('--loader', default=\"hdd\", \n help='Loader to load data; hdd for reading from the hdd and ram for loading all data in the memory')\nparser.add_argument(\"-i\", \"--data-dirs\", nargs='+', default=\"/Users/khurramjaved96/documentTest64\",\n help=\"input Directory of train data\")\nparser.add_argument(\"-v\", \"--validation-dirs\", nargs='+', default=\"/Users/khurramjaved96/documentTest64\",\n help=\"input Directory of val data\")\n\nargs = parser.parse_args()\n\n# Define an experiment.\nmy_experiment = ex.experiment(args.name, args, args.output_dir)\n\n# Add logging support\nlogger = utils.utils.setup_logger(my_experiment.path)\n\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\ndataset = dataprocessor.DatasetFactory.get_dataset(args.data_dirs, args.dataset)\n\ndataset_val = dataprocessor.DatasetFactory.get_dataset(args.validation_dirs, args.dataset)\n\n# Fix the seed.\nseed = args.seed\ntorch.manual_seed(seed)\nif args.cuda:\n torch.cuda.manual_seed(seed)\n\ntrain_dataset_loader = dataprocessor.LoaderFactory.get_loader(args.loader, dataset.myData,\n transform=dataset.train_transform,\n cuda=args.cuda)\n# Loader used for training data\nval_dataset_loader = dataprocessor.LoaderFactory.get_loader(args.loader, dataset_val.myData,\n transform=dataset.test_transform,\n cuda=args.cuda)\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\n\n# Iterator to iterate over training data.\ntrain_iterator = torch.utils.data.DataLoader(train_dataset_loader,\n batch_size=args.batch_size, shuffle=True, **kwargs)\n# Iterator to iterate over training data.\nval_iterator = torch.utils.data.DataLoader(val_dataset_loader,\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\n# Get the required model\nmyModel = model.ModelFactory.get_model(args.model_type, args.dataset)\nif args.cuda:\n myModel.cuda()\n\n# Should I pretrain the model on CIFAR?\nif args.pretrain:\n trainset = dataprocessor.DatasetFactory.get_dataset(None, \"CIFAR\")\n train_iterator_cifar = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True, num_workers=2)\n\n # Define the optimizer used in the experiment\n cifar_optimizer = torch.optim.SGD(myModel.parameters(), args.lr, momentum=args.momentum,\n weight_decay=args.decay, nesterov=True)\n\n # Trainer object used for training\n cifar_trainer = trainer.CIFARTrainer(train_iterator_cifar, myModel, args.cuda, cifar_optimizer)\n\n for epoch in range(0, 70):\n logger.info(\"Epoch : %d\", epoch)\n cifar_trainer.update_lr(epoch, [30, 45, 60], args.gammas)\n cifar_trainer.train(epoch)\n\n # Freeze the model\n counter = 0\n for name, param in myModel.named_parameters():\n # Getting the length of total layers so I can freeze x% of layers\n gen_len = sum(1 for _ in myModel.parameters())\n if counter < int(gen_len * 0.5):\n param.requires_grad = False\n logger.warning(name)\n else:\n logger.info(name)\n counter += 1\n\n# Define the optimizer used in the experiment\noptimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, myModel.parameters()), args.lr,\n momentum=args.momentum,\n weight_decay=args.decay, nesterov=True)\n\n# Trainer object used for training\nmy_trainer = trainer.Trainer(train_iterator, myModel, args.cuda, optimizer)\n\n# Evaluator\nmy_eval = trainer.EvaluatorFactory.get_evaluator(\"rmse\", args.cuda)\n# Running epochs_class epochs\nfor epoch in range(0, args.epochs):\n logger.info(\"Epoch : %d\", epoch)\n my_trainer.update_lr(epoch, args.schedule, args.gammas)\n my_trainer.train(epoch)\n my_eval.evaluate(my_trainer.model, val_iterator)\n\ntorch.save(myModel.state_dict(), my_experiment.path + args.dataset + \"_\" + args.model_type+ \".pb\")\nmy_experiment.store_json()\n" ]
[ [ "torch.manual_seed", "torch.utils.data.DataLoader", "torch.cuda.manual_seed", "torch.cuda.is_available" ] ]